text
stringlengths
96
319k
id
stringlengths
14
178
metadata
dict
from pathlib import Path from tempfile import TemporaryDirectory from typing import Any, Type, TypeVar from huggingface_hub import HfApi from huggingface_hub.utils import validate_hf_hub_args T = TypeVar("T", bound="HubMixin") class HubMixin: """ A Mixin containing the functionality to push an object to the hub. This is similar to huggingface_hub.ModelHubMixin but is lighter and makes less assumptions about its subclasses (in particular, the fact that it's not necessarily a model). The inheriting classes must implement '_save_pretrained' and 'from_pretrained'. """ def save_pretrained( self, save_directory: str | Path, *, repo_id: str | None = None, push_to_hub: bool = False, card_kwargs: dict[str, Any] | None = None, **push_to_hub_kwargs, ) -> str | None: """ Save object in local directory. Args: save_directory (`str` or `Path`): Path to directory in which the object will be saved. push_to_hub (`bool`, *optional*, defaults to `False`): Whether or not to push your object to the Huggingface Hub after saving it. repo_id (`str`, *optional*): ID of your repository on the Hub. Used only if `push_to_hub=True`. Will default to the folder name if not provided. card_kwargs (`Dict[str, Any]`, *optional*): Additional arguments passed to the card template to customize the card. push_to_hub_kwargs: Additional key word arguments passed along to the [`~HubMixin.push_to_hub`] method. Returns: `str` or `None`: url of the commit on the Hub if `push_to_hub=True`, `None` otherwise. """ save_directory = Path(save_directory) save_directory.mkdir(parents=True, exist_ok=True) # save object (weights, files, etc.) self._save_pretrained(save_directory) # push to the Hub if required if push_to_hub: if repo_id is None: repo_id = save_directory.name # Defaults to `save_directory` name return self.push_to_hub(repo_id=repo_id, card_kwargs=card_kwargs, **push_to_hub_kwargs) return None def _save_pretrained(self, save_directory: Path) -> None: """ Overwrite this method in subclass to define how to save your object. Args: save_directory (`str` or `Path`): Path to directory in which the object files will be saved. """ raise NotImplementedError @classmethod @validate_hf_hub_args def from_pretrained( cls: Type[T], pretrained_name_or_path: str | Path, *, force_download: bool = False, resume_download: bool | None = None, proxies: dict | None = None, token: str | bool | None = None, cache_dir: str | Path | None = None, local_files_only: bool = False, revision: str | None = None, **kwargs, ) -> T: """ Download the object from the Huggingface Hub and instantiate it. Args: pretrained_name_or_path (`str`, `Path`): - Either the `repo_id` (string) of the object hosted on the Hub, e.g. `lerobot/diffusion_pusht`. - Or a path to a `directory` containing the object files saved using `.save_pretrained`, e.g., `../path/to/my_model_directory/`. revision (`str`, *optional*): Revision on the Hub. Can be a branch name, a git tag or any commit id. Defaults to the latest commit on `main` branch. force_download (`bool`, *optional*, defaults to `False`): Whether to force (re-)downloading the files from the Hub, overriding the existing cache. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on every request. token (`str` or `bool`, *optional*): The token to use as HTTP bearer authorization for remote files. By default, it will use the token cached when running `huggingface-cli login`. cache_dir (`str`, `Path`, *optional*): Path to the folder where cached files are stored. local_files_only (`bool`, *optional*, defaults to `False`): If `True`, avoid downloading the file and return the path to the local cached file if it exists. kwargs (`Dict`, *optional*): Additional kwargs to pass to the object during initialization. """ raise NotImplementedError @validate_hf_hub_args def push_to_hub( self, repo_id: str, *, commit_message: str | None = None, private: bool | None = None, token: str | None = None, branch: str | None = None, create_pr: bool | None = None, allow_patterns: list[str] | str | None = None, ignore_patterns: list[str] | str | None = None, delete_patterns: list[str] | str | None = None, card_kwargs: dict[str, Any] | None = None, ) -> str: """ Upload model checkpoint to the Hub. Use `allow_patterns` and `ignore_patterns` to precisely filter which files should be pushed to the hub. Use `delete_patterns` to delete existing remote files in the same commit. See [`upload_folder`] reference for more details. Args: repo_id (`str`): ID of the repository to push to (example: `"username/my-model"`). commit_message (`str`, *optional*): Message to commit while pushing. private (`bool`, *optional*): Whether the repository created should be private. If `None` (default), the repo will be public unless the organization's default is private. token (`str`, *optional*): The token to use as HTTP bearer authorization for remote files. By default, it will use the token cached when running `huggingface-cli login`. branch (`str`, *optional*): The git branch on which to push the model. This defaults to `"main"`. create_pr (`boolean`, *optional*): Whether or not to create a Pull Request from `branch` with that commit. Defaults to `False`. allow_patterns (`List[str]` or `str`, *optional*): If provided, only files matching at least one pattern are pushed. ignore_patterns (`List[str]` or `str`, *optional*): If provided, files matching any of the patterns are not pushed. delete_patterns (`List[str]` or `str`, *optional*): If provided, remote files matching any of the patterns will be deleted from the repo. card_kwargs (`Dict[str, Any]`, *optional*): Additional arguments passed to the card template to customize the card. Returns: The url of the commit of your object in the given repository. """ api = HfApi(token=token) repo_id = api.create_repo(repo_id=repo_id, private=private, exist_ok=True).repo_id if commit_message is None: if "Policy" in self.__class__.__name__: commit_message = "Upload policy" elif "Config" in self.__class__.__name__: commit_message = "Upload config" else: commit_message = f"Upload {self.__class__.__name__}" # Push the files to the repo in a single commit with TemporaryDirectory(ignore_cleanup_errors=True) as tmp: saved_path = Path(tmp) / repo_id self.save_pretrained(saved_path, card_kwargs=card_kwargs) return api.upload_folder( repo_id=repo_id, repo_type="model", folder_path=saved_path, commit_message=commit_message, revision=branch, create_pr=create_pr, allow_patterns=allow_patterns, ignore_patterns=ignore_patterns, delete_patterns=delete_patterns, )
lerobot/lerobot/common/utils/hub.py/0
{ "file_path": "lerobot/lerobot/common/utils/hub.py", "repo_id": "lerobot", "token_count": 3654 }
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Use this script to convert your dataset into LeRobot dataset format and upload it to the Hugging Face hub, or store it locally. LeRobot dataset format is lightweight, fast to load from, and does not require any installation of neural net specific packages like pytorch, tensorflow, jax. Example of how to download raw datasets, convert them into LeRobotDataset format, and push them to the hub: ``` python lerobot/scripts/push_dataset_to_hub.py \ --raw-dir data/pusht_raw \ --raw-format pusht_zarr \ --repo-id lerobot/pusht python lerobot/scripts/push_dataset_to_hub.py \ --raw-dir data/xarm_lift_medium_raw \ --raw-format xarm_pkl \ --repo-id lerobot/xarm_lift_medium python lerobot/scripts/push_dataset_to_hub.py \ --raw-dir data/aloha_sim_insertion_scripted_raw \ --raw-format aloha_hdf5 \ --repo-id lerobot/aloha_sim_insertion_scripted python lerobot/scripts/push_dataset_to_hub.py \ --raw-dir data/umi_cup_in_the_wild_raw \ --raw-format umi_zarr \ --repo-id lerobot/umi_cup_in_the_wild ``` """ import argparse import json import shutil import warnings from pathlib import Path from typing import Any import torch from huggingface_hub import HfApi from safetensors.torch import save_file from lerobot.common.datasets.compute_stats import compute_stats from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION, LeRobotDataset from lerobot.common.datasets.push_dataset_to_hub.utils import check_repo_id from lerobot.common.datasets.utils import create_branch, create_lerobot_dataset_card, flatten_dict def get_from_raw_to_lerobot_format_fn(raw_format: str): if raw_format == "pusht_zarr": from lerobot.common.datasets.push_dataset_to_hub.pusht_zarr_format import from_raw_to_lerobot_format elif raw_format == "umi_zarr": from lerobot.common.datasets.push_dataset_to_hub.umi_zarr_format import from_raw_to_lerobot_format elif raw_format == "aloha_hdf5": from lerobot.common.datasets.push_dataset_to_hub.aloha_hdf5_format import from_raw_to_lerobot_format elif raw_format in ["rlds", "openx"]: from lerobot.common.datasets.push_dataset_to_hub.openx_rlds_format import from_raw_to_lerobot_format elif raw_format == "dora_parquet": from lerobot.common.datasets.push_dataset_to_hub.dora_parquet_format import from_raw_to_lerobot_format elif raw_format == "xarm_pkl": from lerobot.common.datasets.push_dataset_to_hub.xarm_pkl_format import from_raw_to_lerobot_format elif raw_format == "cam_png": from lerobot.common.datasets.push_dataset_to_hub.cam_png_format import from_raw_to_lerobot_format else: raise ValueError( f"The selected {raw_format} can't be found. Did you add it to `lerobot/scripts/push_dataset_to_hub.py::get_from_raw_to_lerobot_format_fn`?" ) return from_raw_to_lerobot_format def save_meta_data( info: dict[str, Any], stats: dict, episode_data_index: dict[str, list], meta_data_dir: Path ): meta_data_dir.mkdir(parents=True, exist_ok=True) # save info info_path = meta_data_dir / "info.json" with open(str(info_path), "w") as f: json.dump(info, f, indent=4) # save stats stats_path = meta_data_dir / "stats.safetensors" save_file(flatten_dict(stats), stats_path) # save episode_data_index episode_data_index = {key: torch.tensor(episode_data_index[key]) for key in episode_data_index} ep_data_idx_path = meta_data_dir / "episode_data_index.safetensors" save_file(episode_data_index, ep_data_idx_path) def push_meta_data_to_hub(repo_id: str, meta_data_dir: str | Path, revision: str | None): """Expect all meta data files to be all stored in a single "meta_data" directory. On the hugging face repositery, they will be uploaded in a "meta_data" directory at the root. """ api = HfApi() api.upload_folder( folder_path=meta_data_dir, path_in_repo="meta_data", repo_id=repo_id, revision=revision, repo_type="dataset", ) def push_dataset_card_to_hub( repo_id: str, revision: str | None, tags: list | None = None, license: str = "apache-2.0", **card_kwargs, ): """Creates and pushes a LeRobotDataset Card with appropriate tags to easily find it on the hub.""" card = create_lerobot_dataset_card(tags=tags, license=license, **card_kwargs) card.push_to_hub(repo_id=repo_id, repo_type="dataset", revision=revision) def push_videos_to_hub(repo_id: str, videos_dir: str | Path, revision: str | None): """Expect mp4 files to be all stored in a single "videos" directory. On the hugging face repositery, they will be uploaded in a "videos" directory at the root. """ api = HfApi() api.upload_folder( folder_path=videos_dir, path_in_repo="videos", repo_id=repo_id, revision=revision, repo_type="dataset", allow_patterns="*.mp4", ) def push_dataset_to_hub( raw_dir: Path, raw_format: str, repo_id: str, push_to_hub: bool = True, local_dir: Path | None = None, fps: int | None = None, video: bool = True, batch_size: int = 32, num_workers: int = 8, episodes: list[int] | None = None, force_override: bool = False, resume: bool = False, cache_dir: Path = Path("/tmp"), tests_data_dir: Path | None = None, encoding: dict | None = None, ): check_repo_id(repo_id) user_id, dataset_id = repo_id.split("/") # Robustify when `raw_dir` is str instead of Path raw_dir = Path(raw_dir) if not raw_dir.exists(): raise NotADirectoryError( f"{raw_dir} does not exists. Check your paths or run this command to download an existing raw dataset on the hub: " f"`python lerobot/common/datasets/push_dataset_to_hub/_download_raw.py --raw-dir your/raw/dir --repo-id your/repo/id_raw`" ) if local_dir: # Robustify when `local_dir` is str instead of Path local_dir = Path(local_dir) # Send warning if local_dir isn't well formated if local_dir.parts[-2] != user_id or local_dir.parts[-1] != dataset_id: warnings.warn( f"`local_dir` ({local_dir}) doesn't contain a community or user id `/` the name of the dataset that match the `repo_id` (e.g. 'data/lerobot/pusht'). Following this naming convention is advised, but not mandatory.", stacklevel=1, ) # Check we don't override an existing `local_dir` by mistake if local_dir.exists(): if force_override: shutil.rmtree(local_dir) elif not resume: raise ValueError(f"`local_dir` already exists ({local_dir}). Use `--force-override 1`.") meta_data_dir = local_dir / "meta_data" videos_dir = local_dir / "videos" else: # Temporary directory used to store images, videos, meta_data meta_data_dir = Path(cache_dir) / "meta_data" videos_dir = Path(cache_dir) / "videos" if raw_format is None: # TODO(rcadene, adilzouitine): implement auto_find_raw_format raise NotImplementedError() # raw_format = auto_find_raw_format(raw_dir) # convert dataset from original raw format to LeRobot format from_raw_to_lerobot_format = get_from_raw_to_lerobot_format_fn(raw_format) hf_dataset, episode_data_index, info = from_raw_to_lerobot_format( raw_dir, videos_dir, fps, video, episodes, encoding, ) lerobot_dataset = LeRobotDataset.from_preloaded( repo_id=repo_id, hf_dataset=hf_dataset, episode_data_index=episode_data_index, info=info, videos_dir=videos_dir, ) stats = compute_stats(lerobot_dataset, batch_size, num_workers) if local_dir: hf_dataset = hf_dataset.with_format(None) # to remove transforms that cant be saved hf_dataset.save_to_disk(str(local_dir / "train")) if push_to_hub or local_dir: # mandatory for upload save_meta_data(info, stats, episode_data_index, meta_data_dir) if push_to_hub: hf_dataset.push_to_hub(repo_id, revision="main") push_meta_data_to_hub(repo_id, meta_data_dir, revision="main") push_dataset_card_to_hub(repo_id, revision="main") if video: push_videos_to_hub(repo_id, videos_dir, revision="main") create_branch(repo_id, repo_type="dataset", branch=CODEBASE_VERSION) if tests_data_dir: # get the first episode num_items_first_ep = episode_data_index["to"][0] - episode_data_index["from"][0] test_hf_dataset = hf_dataset.select(range(num_items_first_ep)) episode_data_index = {k: v[:1] for k, v in episode_data_index.items()} test_hf_dataset = test_hf_dataset.with_format(None) test_hf_dataset.save_to_disk(str(tests_data_dir / repo_id / "train")) tests_meta_data = tests_data_dir / repo_id / "meta_data" save_meta_data(info, stats, episode_data_index, tests_meta_data) # copy videos of first episode to tests directory episode_index = 0 tests_videos_dir = tests_data_dir / repo_id / "videos" tests_videos_dir.mkdir(parents=True, exist_ok=True) for key in lerobot_dataset.camera_keys: fname = f"{key}_episode_{episode_index:06d}.mp4" shutil.copy(videos_dir / fname, tests_videos_dir / fname) if local_dir is None: # clear cache shutil.rmtree(meta_data_dir) shutil.rmtree(videos_dir) return lerobot_dataset def main(): parser = argparse.ArgumentParser() parser.add_argument( "--raw-dir", type=Path, required=True, help="Directory containing input raw datasets (e.g. `data/aloha_mobile_chair_raw` or `data/pusht_raw).", ) # TODO(rcadene): add automatic detection of the format parser.add_argument( "--raw-format", type=str, required=True, help="Dataset type (e.g. `pusht_zarr`, `umi_zarr`, `aloha_hdf5`, `xarm_pkl`, `dora_parquet`, `rlds`, `openx`).", ) parser.add_argument( "--repo-id", type=str, required=True, help="Repositery identifier on Hugging Face: a community or a user name `/` the name of the dataset (e.g. `lerobot/pusht`, `cadene/aloha_sim_insertion_human`).", ) parser.add_argument( "--local-dir", type=Path, help="When provided, writes the dataset converted to LeRobotDataset format in this directory (e.g. `data/lerobot/aloha_mobile_chair`).", ) parser.add_argument( "--push-to-hub", type=int, default=1, help="Upload to hub.", ) parser.add_argument( "--fps", type=int, help="Frame rate used to collect videos. If not provided, use the default one specified in the code.", ) parser.add_argument( "--video", type=int, default=1, help="Convert each episode of the raw dataset to an mp4 video. This option allows 60 times lower disk space consumption and 25 faster loading time during training.", ) parser.add_argument( "--batch-size", type=int, default=32, help="Batch size loaded by DataLoader for computing the dataset statistics.", ) parser.add_argument( "--num-workers", type=int, default=8, help="Number of processes of Dataloader for computing the dataset statistics.", ) parser.add_argument( "--episodes", type=int, nargs="*", help="When provided, only converts the provided episodes (e.g `--episodes 2 3 4`). Useful to test the code on 1 episode.", ) parser.add_argument( "--force-override", type=int, default=0, help="When set to 1, removes provided output directory if it already exists. By default, raises a ValueError exception.", ) parser.add_argument( "--resume", type=int, default=0, help="When set to 1, resumes a previous run.", ) parser.add_argument( "--cache-dir", type=Path, required=False, default="/tmp", help="Directory to store the temporary videos and images generated while creating the dataset.", ) parser.add_argument( "--tests-data-dir", type=Path, help=( "When provided, save tests artifacts into the given directory " "(e.g. `--tests-data-dir tests/data` will save to tests/data/{--repo-id})." ), ) args = parser.parse_args() push_dataset_to_hub(**vars(args)) if __name__ == "__main__": main()
lerobot/lerobot/scripts/push_dataset_to_hub.py/0
{ "file_path": "lerobot/lerobot/scripts/push_dataset_to_hub.py", "repo_id": "lerobot", "token_count": 5655 }
from functools import cache import numpy as np CAP_PROP_FPS = 5 CAP_PROP_FRAME_WIDTH = 3 CAP_PROP_FRAME_HEIGHT = 4 COLOR_RGB2BGR = 4 COLOR_BGR2RGB = 4 ROTATE_90_COUNTERCLOCKWISE = 2 ROTATE_90_CLOCKWISE = 0 ROTATE_180 = 1 @cache def _generate_image(width: int, height: int): return np.random.randint(0, 256, size=(height, width, 3), dtype=np.uint8) def cvtColor(color_image, color_convertion): # noqa: N802 if color_convertion in [COLOR_RGB2BGR, COLOR_BGR2RGB]: return color_image[:, :, [2, 1, 0]] else: raise NotImplementedError(color_convertion) def rotate(color_image, rotation): if rotation is None: return color_image elif rotation == ROTATE_90_CLOCKWISE: return np.rot90(color_image, k=1) elif rotation == ROTATE_180: return np.rot90(color_image, k=2) elif rotation == ROTATE_90_COUNTERCLOCKWISE: return np.rot90(color_image, k=3) else: raise NotImplementedError(rotation) class VideoCapture: def __init__(self, *args, **kwargs): self._mock_dict = { CAP_PROP_FPS: 30, CAP_PROP_FRAME_WIDTH: 640, CAP_PROP_FRAME_HEIGHT: 480, } self._is_opened = True def isOpened(self): # noqa: N802 return self._is_opened def set(self, propId: int, value: float) -> bool: # noqa: N803 if not self._is_opened: raise RuntimeError("Camera is not opened") self._mock_dict[propId] = value return True def get(self, propId: int) -> float: # noqa: N803 if not self._is_opened: raise RuntimeError("Camera is not opened") value = self._mock_dict[propId] if value == 0: if propId == CAP_PROP_FRAME_HEIGHT: value = 480 elif propId == CAP_PROP_FRAME_WIDTH: value = 640 return value def read(self): if not self._is_opened: raise RuntimeError("Camera is not opened") h = self.get(CAP_PROP_FRAME_HEIGHT) w = self.get(CAP_PROP_FRAME_WIDTH) ret = True return ret, _generate_image(width=w, height=h) def release(self): self._is_opened = False def __del__(self): if self._is_opened: self.release()
lerobot/tests/mock_cv2.py/0
{ "file_path": "lerobot/tests/mock_cv2.py", "repo_id": "lerobot", "token_count": 1099 }
""" Tests for physical motors and their mocked versions. If the physical motors are not connected to the computer, or not working, the test will be skipped. Example of running a specific test: ```bash pytest -sx tests/test_motors.py::test_find_port pytest -sx tests/test_motors.py::test_motors_bus ``` Example of running test on real dynamixel motors connected to the computer: ```bash pytest -sx 'tests/test_motors.py::test_motors_bus[dynamixel-False]' ``` Example of running test on a mocked version of dynamixel motors: ```bash pytest -sx 'tests/test_motors.py::test_motors_bus[dynamixel-True]' ``` """ # TODO(rcadene): measure fps in nightly? # TODO(rcadene): test logs # TODO(rcadene): test calibration # TODO(rcadene): add compatibility with other motors bus import time import numpy as np import pytest from lerobot.common.robot_devices.utils import RobotDeviceAlreadyConnectedError, RobotDeviceNotConnectedError from lerobot.scripts.find_motors_bus_port import find_port from tests.utils import TEST_MOTOR_TYPES, make_motors_bus, require_motor @pytest.mark.parametrize("motor_type, mock", TEST_MOTOR_TYPES) @require_motor def test_find_port(request, motor_type, mock): if mock: request.getfixturevalue("patch_builtins_input") with pytest.raises(OSError): find_port() else: find_port() @pytest.mark.parametrize("motor_type, mock", TEST_MOTOR_TYPES) @require_motor def test_configure_motors_all_ids_1(request, motor_type, mock): if mock: request.getfixturevalue("patch_builtins_input") if motor_type == "dynamixel": # see X_SERIES_BAUDRATE_TABLE smaller_baudrate = 9_600 smaller_baudrate_value = 0 elif motor_type == "feetech": # see SCS_SERIES_BAUDRATE_TABLE smaller_baudrate = 19_200 smaller_baudrate_value = 7 else: raise ValueError(motor_type) input("Are you sure you want to re-configure the motors? Press enter to continue...") # This test expect the configuration was already correct. motors_bus = make_motors_bus(motor_type, mock=mock) motors_bus.connect() motors_bus.write("Baud_Rate", [smaller_baudrate_value] * len(motors_bus.motors)) motors_bus.set_bus_baudrate(smaller_baudrate) motors_bus.write("ID", [1] * len(motors_bus.motors)) del motors_bus # Test configure motors_bus = make_motors_bus(motor_type, mock=mock) motors_bus.connect() assert motors_bus.are_motors_configured() del motors_bus @pytest.mark.parametrize("motor_type, mock", TEST_MOTOR_TYPES) @require_motor def test_motors_bus(request, motor_type, mock): if mock: request.getfixturevalue("patch_builtins_input") motors_bus = make_motors_bus(motor_type, mock=mock) # Test reading and writting before connecting raises an error with pytest.raises(RobotDeviceNotConnectedError): motors_bus.read("Torque_Enable") with pytest.raises(RobotDeviceNotConnectedError): motors_bus.write("Torque_Enable", 1) with pytest.raises(RobotDeviceNotConnectedError): motors_bus.disconnect() # Test deleting the object without connecting first del motors_bus # Test connecting motors_bus = make_motors_bus(motor_type, mock=mock) motors_bus.connect() # Test connecting twice raises an error with pytest.raises(RobotDeviceAlreadyConnectedError): motors_bus.connect() # Test disabling torque and reading torque on all motors motors_bus.write("Torque_Enable", 0) values = motors_bus.read("Torque_Enable") assert isinstance(values, np.ndarray) assert len(values) == len(motors_bus.motors) assert (values == 0).all() # Test writing torque on a specific motor motors_bus.write("Torque_Enable", 1, "gripper") # Test reading torque from this specific motor. It is now 1 values = motors_bus.read("Torque_Enable", "gripper") assert len(values) == 1 assert values[0] == 1 # Test reading torque from all motors. It is 1 for the specific motor, # and 0 on the others. values = motors_bus.read("Torque_Enable") gripper_index = motors_bus.motor_names.index("gripper") assert values[gripper_index] == 1 assert values.sum() == 1 # gripper is the only motor to have torque 1 # Test writing torque on all motors and it is 1 for all. motors_bus.write("Torque_Enable", 1) values = motors_bus.read("Torque_Enable") assert (values == 1).all() # Test ordering the motors to move slightly (+1 value among 4096) and this move # can be executed and seen by the motor position sensor values = motors_bus.read("Present_Position") motors_bus.write("Goal_Position", values + 1) # Give time for the motors to move to the goal position time.sleep(1) new_values = motors_bus.read("Present_Position") assert (new_values == values).all()
lerobot/tests/test_motors.py/0
{ "file_path": "lerobot/tests/test_motors.py", "repo_id": "lerobot", "token_count": 1793 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Supervised fine-tuning script for decoder language models. Usage: # One 1 node of 8 x H100s accelerate launch --config_file=recipes/accelerate_configs/zero3.yaml src/open_r1/sft.py \ --model_name_or_path Qwen/Qwen2.5-1.5B-Instruct \ --dataset_name HuggingFaceH4/Bespoke-Stratos-17k \ --learning_rate 2.0e-5 \ --num_train_epochs 1 \ --packing \ --max_seq_length 4096 \ --per_device_train_batch_size 2 \ --gradient_accumulation_steps 8 \ --gradient_checkpointing \ --bf16 \ --logging_steps 5 \ --eval_strategy steps \ --eval_steps 100 \ --output_dir data/Qwen2.5-1.5B-Open-R1-Distill """ import logging import os import sys import datasets import torch import transformers from datasets import load_dataset from transformers import AutoTokenizer, set_seed from transformers.trainer_utils import get_last_checkpoint from open_r1.configs import SFTConfig from open_r1.utils.callbacks import get_callbacks from trl import ( ModelConfig, ScriptArguments, SFTTrainer, TrlParser, get_kbit_device_map, get_peft_config, get_quantization_config, ) logger = logging.getLogger(__name__) def main(script_args, training_args, model_args): # Set seed for reproducibility set_seed(training_args.seed) ############### # Setup logging ############### logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process a small summary logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f" distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) logger.info(f"Model parameters {model_args}") logger.info(f"Script parameters {script_args}") logger.info(f"Data parameters {training_args}") # Check for last checkpoint last_checkpoint = None if os.path.isdir(training_args.output_dir): last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info(f"Checkpoint detected, resuming training at {last_checkpoint=}.") ################ # Load datasets ################ dataset = load_dataset(script_args.dataset_name, name=script_args.dataset_config) ################ # Load tokenizer ################ tokenizer = AutoTokenizer.from_pretrained( model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code, use_fast=True ) tokenizer.pad_token = tokenizer.eos_token ################### # Model init kwargs ################### logger.info("*** Initializing model kwargs ***") torch_dtype = ( model_args.torch_dtype if model_args.torch_dtype in ["auto", None] else getattr(torch, model_args.torch_dtype) ) quantization_config = get_quantization_config(model_args) model_kwargs = dict( revision=model_args.model_revision, trust_remote_code=model_args.trust_remote_code, attn_implementation=model_args.attn_implementation, torch_dtype=torch_dtype, use_cache=False if training_args.gradient_checkpointing else True, device_map=get_kbit_device_map() if quantization_config is not None else None, quantization_config=quantization_config, ) training_args.model_init_kwargs = model_kwargs ############################ # Initialize the SFT Trainer ############################ trainer = SFTTrainer( model=model_args.model_name_or_path, args=training_args, train_dataset=dataset[script_args.dataset_train_split], eval_dataset=dataset[script_args.dataset_test_split] if training_args.eval_strategy != "no" else None, processing_class=tokenizer, peft_config=get_peft_config(model_args), callbacks=get_callbacks(training_args, model_args), ) ############### # Training loop ############### logger.info("*** Train ***") checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) metrics = train_result.metrics metrics["train_samples"] = len(dataset[script_args.dataset_train_split]) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() ################################## # Save model and create model card ################################## logger.info("*** Save model ***") trainer.save_model(training_args.output_dir) logger.info(f"Model saved to {training_args.output_dir}") # Save everything else on main process kwargs = { "dataset_name": script_args.dataset_name, "tags": ["open-r1"], } if trainer.accelerator.is_main_process: trainer.create_model_card(**kwargs) # Restore k,v cache for fast inference trainer.model.config.use_cache = True trainer.model.config.save_pretrained(training_args.output_dir) ########## # Evaluate ########## if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate() metrics["eval_samples"] = len(dataset[script_args.dataset_test_split]) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) ############# # push to hub ############# if training_args.push_to_hub: logger.info("Pushing to hub...") trainer.push_to_hub(**kwargs) if __name__ == "__main__": parser = TrlParser((ScriptArguments, SFTConfig, ModelConfig)) script_args, training_args, model_args = parser.parse_args_and_config() main(script_args, training_args, model_args)
open-r1/src/open_r1/sft.py/0
{ "file_path": "open-r1/src/open_r1/sft.py", "repo_id": "open-r1", "token_count": 2638 }
<!--- Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Generating the documentation To generate the documentation, you first have to build it. Several packages are necessary to build the doc, you can install them with the following command, at the root of the code repository: ```bash pip install -e ".[docs]" ``` Then you need to install our special tool that builds the documentation: ```bash pip install git+https://github.com/huggingface/doc-builder ``` --- **NOTE** You only need to generate the documentation to inspect it locally (if you're planning changes and want to check how they look before committing for instance). You don't have to commit to the built documentation. --- ## Building the documentation Once you have setup the `doc-builder` and additional packages, you can generate the documentation by typing the following command: ```bash doc-builder build peft docs/source/ --build_dir ~/tmp/test-build ``` You can adapt the `--build_dir` to set any temporary folder you prefer. This command will create it and generate the MDX files that will be rendered as the documentation on the main website. You can inspect them in your favorite Markdown editor. ## Previewing the documentation To preview the docs, first install the `watchdog` module with: ```bash pip install watchdog ``` Then run the following command: ```bash doc-builder preview {package_name} {path_to_docs} ``` For example: ```bash doc-builder preview peft docs/source ``` The docs will be viewable at [http://localhost:3000](http://localhost:3000). You can also preview the docs once you have opened a PR. You will see a bot add a comment to a link where the documentation with your changes lives. --- **NOTE** The `preview` command only works with existing doc files. When you add a completely new file, you need to update `_toctree.yml` & restart `preview` command (`ctrl-c` to stop it & call `doc-builder preview ...` again). --- ## Adding a new element to the navigation bar Accepted files are Markdown (.md or .mdx). Create a file with its extension and put it in the source directory. You can then link it to the toc-tree by putting the filename without the extension in the [`_toctree.yml`](https://github.com/huggingface/peft/blob/main/docs/source/_toctree.yml) file. ## Renaming section headers and moving sections It helps to keep the old links working when renaming the section header and/or moving sections from one document to another. This is because the old links are likely to be used in Issues, Forums, and Social media and it'd make for a much more superior user experience if users reading those months later could still easily navigate to the originally intended information. Therefore, we simply keep a little map of moved sections at the end of the document where the original section was. The key is to preserve the original anchor. So if you renamed a section from: "Section A" to "Section B", then you can add at the end of the file: ``` Sections that were moved: [ <a href="#section-b">Section A</a><a id="section-a"></a> ] ``` and of course, if you moved it to another file, then: ``` Sections that were moved: [ <a href="../new-file#section-b">Section A</a><a id="section-a"></a> ] ``` Use the relative style to link to the new file so that the versioned docs continue to work. ## Writing Documentation - Specification The `huggingface/peft` documentation follows the [Google documentation](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html) style for docstrings, although we can write them directly in Markdown. ### Adding a new tutorial Adding a new tutorial or section is done in two steps: - Add a new file under `./source`. This file can either be ReStructuredText (.rst) or Markdown (.md). - Link that file in `./source/_toctree.yml` on the correct toc-tree. Make sure to put your new file under the proper section. It's unlikely to go in the first section (*Get Started*), so depending on the intended targets (beginners, more advanced users, or researchers) it should go into sections two, three, or four. ### Writing source documentation Values that should be put in `code` should either be surrounded by backticks: \`like so\`. Note that argument names and objects like True, None, or any strings should usually be put in `code`. When mentioning a class, function, or method, it is recommended to use our syntax for internal links so that our tool adds a link to its documentation with this syntax: \[\`XXXClass\`\] or \[\`function\`\]. This requires the class or function to be in the main package. If you want to create a link to some internal class or function, you need to provide its path. For instance: \[\`utils.gather\`\]. This will be converted into a link with `utils.gather` in the description. To get rid of the path and only keep the name of the object you are linking to in the description, add a ~: \[\`~utils.gather\`\] will generate a link with `gather` in the description. The same works for methods so you can either use \[\`XXXClass.method\`\] or \[~\`XXXClass.method\`\]. #### Defining arguments in a method Arguments should be defined with the `Args:` (or `Arguments:` or `Parameters:`) prefix, followed by a line return and an indentation. The argument should be followed by its type, with its shape if it is a tensor, a colon, and its description: ``` Args: n_layers (`int`): The number of layers of the model. ``` If the description is too long to fit in one line (more than 119 characters in total), another indentation is necessary before writing the description after the argument. Finally, to maintain uniformity if any *one* description is too long to fit on one line, the rest of the parameters should follow suit and have an indention before their description. Here's an example showcasing everything so far: ``` Args: gradient_accumulation_steps (`int`, *optional*, default to 1): The number of steps that should pass before gradients are accumulated. A number > 1 should be combined with `Accelerator.accumulate`. cpu (`bool`, *optional*): Whether or not to force the script to execute on CPU. Will ignore GPU available if set to `True` and force the execution on one process only. ``` For optional arguments or arguments with defaults we follow the following syntax: imagine we have a function with the following signature: ``` def my_function(x: str = None, a: float = 1): ``` then its documentation should look like this: ``` Args: x (`str`, *optional*): This argument controls ... and has a description longer than 119 chars. a (`float`, *optional*, defaults to 1): This argument is used to ... and has a description longer than 119 chars. ``` Note that we always omit the "defaults to \`None\`" when None is the default for any argument. Also note that even if the first line describing your argument type and its default gets long, you can't break it into several lines. You can however write as many lines as you want in the indented description (see the example above with `input_ids`). #### Writing a multi-line code block Multi-line code blocks can be useful for displaying examples. They are done between two lines of three backticks as usual in Markdown: ```` ```python # first line of code # second line # etc ``` ```` #### Writing a return block The return block should be introduced with the `Returns:` prefix, followed by a line return and an indentation. The first line should be the type of the return, followed by a line return. No need to indent further for the elements building the return. Here's an example of a single value return: ``` Returns: `List[int]`: A list of integers in the range [0, 1] --- 1 for a special token, 0 for a sequence token. ``` Here's an example of a tuple return, comprising several objects: ``` Returns: `tuple(torch.FloatTensor)` comprising various elements depending on the configuration ([`BertConfig`]) and inputs: - ** loss** (*optional*, returned when `masked_lm_labels` is provided) `torch.FloatTensor` of shape `(1,)` -- Total loss is the sum of the masked language modeling loss and the next sequence prediction (classification) loss. - **prediction_scores** (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`) -- Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). ``` ## Styling the docstring We have an automatic script running with the `make style` comment that will make sure that: - the docstrings fully take advantage of the line width - all code examples are formatted using black, like the code of the Transformers library This script may have some weird failures if you make a syntax mistake or if you uncover a bug. Therefore, it's recommended to commit your changes before running `make style`, so you can revert the changes done by that script easily. ## Writing documentation examples The syntax, for example, docstrings can look as follows: ``` Example: ```python >>> import time >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> if accelerator.is_main_process: ... time.sleep(2) >>> else: ... print("I'm waiting for the main process to finish its sleep...") >>> accelerator.wait_for_everyone() >>> # Should print on every process at the same time >>> print("Everyone is here") ``` ``` The docstring should give a minimal, clear example of how the respective function is to be used in inference and also include the expected (ideally sensible) output. Often, readers will try out the example before even going through the function or class definitions. Therefore, it is of utmost importance that the example works as expected.
peft/docs/README.md/0
{ "file_path": "peft/docs/README.md", "repo_id": "peft", "token_count": 2889 }
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Quantization Quantization represents data with fewer bits, making it a useful technique for reducing memory-usage and accelerating inference especially when it comes to large language models (LLMs). There are several ways to quantize a model including: * optimizing which model weights are quantized with the [AWQ](https://hf.co/papers/2306.00978) algorithm * independently quantizing each row of a weight matrix with the [GPTQ](https://hf.co/papers/2210.17323) algorithm * quantizing to 8-bit and 4-bit precision with the [bitsandbytes](https://github.com/TimDettmers/bitsandbytes) library * quantizing to as low as 2-bit precision with the [AQLM](https://arxiv.org/abs/2401.06118) algorithm However, after a model is quantized it isn't typically further trained for downstream tasks because training can be unstable due to the lower precision of the weights and activations. But since PEFT methods only add *extra* trainable parameters, this allows you to train a quantized model with a PEFT adapter on top! Combining quantization with PEFT can be a good strategy for training even the largest models on a single GPU. For example, [QLoRA](https://hf.co/papers/2305.14314) is a method that quantizes a model to 4-bits and then trains it with LoRA. This method allows you to finetune a 65B parameter model on a single 48GB GPU! In this guide, you'll see how to quantize a model to 4-bits and train it with LoRA. ## Quantize a model [bitsandbytes](https://github.com/TimDettmers/bitsandbytes) is a quantization library with a Transformers integration. With this integration, you can quantize a model to 8 or 4-bits and enable many other options by configuring the [`~transformers.BitsAndBytesConfig`] class. For example, you can: * set `load_in_4bit=True` to quantize the model to 4-bits when you load it * set `bnb_4bit_quant_type="nf4"` to use a special 4-bit data type for weights initialized from a normal distribution * set `bnb_4bit_use_double_quant=True` to use a nested quantization scheme to quantize the already quantized weights * set `bnb_4bit_compute_dtype=torch.bfloat16` to use bfloat16 for faster computation ```py import torch from transformers import BitsAndBytesConfig config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_use_double_quant=True, bnb_4bit_compute_dtype=torch.bfloat16, ) ``` Pass the `config` to the [`~transformers.AutoModelForCausalLM.from_pretrained`] method. ```py from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1", quantization_config=config) ``` Next, you should call the [`~peft.utils.prepare_model_for_kbit_training`] function to preprocess the quantized model for training. ```py from peft import prepare_model_for_kbit_training model = prepare_model_for_kbit_training(model) ``` Now that the quantized model is ready, let's set up a configuration. ## LoraConfig Create a [`LoraConfig`] with the following parameters (or choose your own): ```py from peft import LoraConfig config = LoraConfig( r=16, lora_alpha=8, target_modules=["q_proj", "k_proj", "v_proj", "o_proj"], lora_dropout=0.05, bias="none", task_type="CAUSAL_LM" ) ``` Then use the [`get_peft_model`] function to create a [`PeftModel`] from the quantized model and configuration. ```py from peft import get_peft_model model = get_peft_model(model, config) ``` You're all set for training with whichever training method you prefer! ### LoftQ initialization [LoftQ](https://hf.co/papers/2310.08659) initializes LoRA weights such that the quantization error is minimized, and it can improve performance when training quantized models. To get started, follow [these instructions](https://github.com/huggingface/peft/tree/main/examples/loftq_finetuning). In general, for LoftQ to work best, it is recommended to target as many layers with LoRA as possible, since those not targeted cannot have LoftQ applied. This means that passing `LoraConfig(..., target_modules="all-linear")` will most likely give the best results. Also, you should use `nf4` as quant type in your quantization config when using 4bit quantization, i.e. `BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_quant_type="nf4")`. ### QLoRA-style training QLoRA adds trainable weights to all the linear layers in the transformer architecture. Since the attribute names for these linear layers can vary across architectures, set `target_modules` to `"all-linear"` to add LoRA to all the linear layers: ```py config = LoraConfig(target_modules="all-linear", ...) ``` ## GPTQ quantization You can learn more about gptq based `[2, 3, 4, 8]` bits quantization at [GPTQModel](https://github.com/ModelCloud/GPTQModel) and the Transformers [GPTQ](https://huggingface.co/docs/transformers/quantization/gptq) doc. Post-quant training, PEFT can use both [GPTQModel](https://github.com/ModelCloud/GPTQModel) or [AutoGPTQ](https://github.com/autogptq/autogptq) libraries, but we recommend GPTQModel because AutoGPTQ will be deprecated in a future release. ```bash # gptqmodel install pip install gptqmodel --no-build-isolation ``` ```py from transformers import AutoModelForCausalLM, AutoTokenizer, GPTQConfig model_id = "facebook/opt-125m" tokenizer = AutoTokenizer.from_pretrained(model_id) gptq_config = GPTQConfig(bits=4, group_size=128, dataset="wikitext2", tokenizer=tokenizer) quantized_model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", quantization_config=gptq_config) # save quantized model quantized_model.save_pretrained("./opt-125m-gptq") tokenizer.save_pretrained("./opt-125m-gptq") ``` Once quantized, you can post-train GPTQ models with PEFT APIs. ## AQLM quantization Additive Quantization of Language Models ([AQLM](https://arxiv.org/abs/2401.06118)) is a Large Language Models compression method. It quantizes multiple weights together and takes advantage of interdependencies between them. AQLM represents groups of 8-16 weights as a sum of multiple vector codes. This allows it to compress models down to as low as 2-bit with considerably low accuracy losses. Since the AQLM quantization process is computationally expensive, a use of prequantized models is recommended. A partial list of available models can be found in the official aqlm [repository](https://github.com/Vahe1994/AQLM). The models support LoRA adapter tuning. To tune the quantized model you'll need to install the `aqlm` inference library: `pip install aqlm>=1.0.2`. Finetuned LoRA adapters shall be saved separately, as merging them with AQLM quantized weights is not possible. ```py quantized_model = AutoModelForCausalLM.from_pretrained( "BlackSamorez/Mixtral-8x7b-AQLM-2Bit-1x16-hf-test-dispatch", torch_dtype="auto", device_map="auto", low_cpu_mem_usage=True, ) peft_config = LoraConfig(...) quantized_model = get_peft_model(quantized_model, peft_config) ``` You can refer to the [Google Colab](https://colab.research.google.com/drive/12GTp1FCj5_0SnnNQH18h_2XFh9vS_guX?usp=sharing) example for an overview of AQLM+LoRA finetuning. ## EETQ quantization You can also perform LoRA fine-tuning on EETQ quantized models. [EETQ](https://github.com/NetEase-FuXi/EETQ) package offers simple and efficient way to perform 8-bit quantization, which is claimed to be faster than the `LLM.int8()` algorithm. First, make sure that you have a transformers version that is compatible with EETQ (e.g. by installing it from latest pypi or from source). ```py import torch from transformers import EetqConfig config = EetqConfig("int8") ``` Pass the `config` to the [`~transformers.AutoModelForCausalLM.from_pretrained`] method. ```py from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1", quantization_config=config) ``` and create a `LoraConfig` and pass it to `get_peft_model`: ```py from peft import LoraConfig, get_peft_model config = LoraConfig( r=16, lora_alpha=8, target_modules=["q_proj", "k_proj", "v_proj", "o_proj"], lora_dropout=0.05, bias="none", task_type="CAUSAL_LM" ) model = get_peft_model(model, config) ``` ## HQQ quantization The models that is quantized using Half-Quadratic Quantization of Large Machine Learning Models ([HQQ](https://mobiusml.github.io/hqq_blog/)) support LoRA adapter tuning. To tune the quantized model, you'll need to install the `hqq` library with: `pip install hqq`. ```python from hqq.engine.hf import HQQModelForCausalLM quantized_model = HQQModelForCausalLM.from_quantized(save_dir_or_hfhub, device='cuda') peft_config = LoraConfig(...) quantized_model = get_peft_model(quantized_model, peft_config) ``` Or using transformers version that is compatible with HQQ (e.g. by installing it from latest pypi or from source). ```python from transformers import HqqConfig, AutoModelForCausalLM quant_config = HqqConfig(nbits=4, group_size=64) quantized_model = AutoModelForCausalLM.from_pretrained(save_dir_or_hfhub, device_map=device_map, quantization_config=quant_config) peft_config = LoraConfig(...) quantized_model = get_peft_model(quantized_model, peft_config) ``` ## torchao (PyTorch Architecture Optimization) PEFT supports models quantized with [torchao](https://github.com/pytorch/ao) ("ao") for int8 quantization. ```python from peft import LoraConfig, get_peft_model from transformers import AutoModelForCausalLM, TorchAoConfig model_id = ... quantization_config = TorchAoConfig(quant_type="int8_weight_only") base_model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=quantization_config) peft_config = LoraConfig(...) model = get_peft_model(base_model, peft_config) ``` ### Caveats: - Use the most recent versions of torchao (>= v0.4.0) and transformers (> 4.42). - Only linear layers are currently supported. - `quant_type = "int4_weight_only"` is currently not supported. - `NF4` is not implemented in transformers as of yet and is thus also not supported. - DoRA only works with `quant_type = "int8_weight_only"` at the moment. - There is explicit support for torchao when used with LoRA. However, when torchao quantizes a layer, its class does not change, only the type of the underlying tensor. For this reason, PEFT methods other than LoRA will generally also work with torchao, even if not explicitly supported. Be aware, however, that **merging only works correctly with LoRA and with `quant_type = "int8_weight_only"`**. If you use a different PEFT method or dtype, merging will likely result in an error, and even it doesn't, the results will still be incorrect. ## Other Supported PEFT Methods Besides LoRA, the following PEFT methods also support quantization: - **VeRA** (supports bitsandbytes quantization) - **AdaLoRA** (supports both bitsandbytes and GPTQ quantization) - **(IA)³** (supports bitsandbytes quantization) ## Next steps If you're interested in learning more about quantization, the following may be helpful: * Learn more details about QLoRA and check out some benchmarks on its impact in the [Making LLMs even more accessible with bitsandbytes, 4-bit quantization and QLoRA](https://huggingface.co/blog/4bit-transformers-bitsandbytes) blog post. * Read more about different quantization schemes in the Transformers [Quantization](https://hf.co/docs/transformers/main/quantization) guide.
peft/docs/source/developer_guides/quantization.md/0
{ "file_path": "peft/docs/source/developer_guides/quantization.md", "repo_id": "peft", "token_count": 3748 }
#!/usr/bin/env python # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # The implementation is based on "Parameter-Efficient Orthogonal Finetuning # via Butterfly Factorization" (https://arxiv.org/abs/2311.06243) in ICLR 2024. import itertools import logging import math import os from pathlib import Path import datasets import diffusers import torch import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers import ( AutoencoderKL, DDIMScheduler, ) from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version from diffusers.utils.import_utils import is_xformers_available from packaging import version from tqdm.auto import tqdm from transformers import AutoTokenizer from utils.args_loader import ( import_model_class_from_model_name_or_path, parse_args, ) from utils.dataset import collate_fn, log_validation, make_dataset from utils.light_controlnet import ControlNetModel from utils.tracemalloc import TorchTracemalloc, b2mb from utils.unet_2d_condition import UNet2DConditionNewModel from peft import BOFTConfig, get_peft_model from peft.peft_model import PeftModel # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.16.0.dev0") logger = get_logger(__name__) UNET_TARGET_MODULES = ["to_q", "to_v", "to_k", "query", "value", "key"] TEXT_ENCODER_TARGET_MODULES = ["q_proj", "v_proj"] @torch.no_grad() def save_adaptor(accelerator, output_dir, nets_dict): for net_key in nets_dict.keys(): net_model = nets_dict[net_key] unwarpped_net = accelerator.unwrap_model(net_model) if isinstance(unwarpped_net, PeftModel): unwarpped_net.save_pretrained( os.path.join(output_dir, net_key), state_dict=accelerator.get_state_dict(net_model), safe_serialization=True, ) else: accelerator.save_model( unwarpped_net, os.path.join(output_dir, net_key), safe_serialization=True, ) def main(args): logging_dir = Path(args.output_dir, args.logging_dir) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_dir=logging_dir, ) if args.report_to == "wandb": wandb_init = { "wandb": { "name": args.wandb_run_name, "mode": "online", } } # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) # Load the tokenizer if args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, revision=args.revision, use_fast=False) elif args.pretrained_model_name_or_path: tokenizer = AutoTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision, use_fast=False, ) # import correct text encoder class text_encoder_cls = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path, args.revision) # Load scheduler and models noise_scheduler = DDIMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") text_encoder = text_encoder_cls.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision ) vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision) unet = UNet2DConditionNewModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, ) controlnet = ControlNetModel() if args.controlnet_model_name_or_path != "": logger.info(f"Loading existing controlnet weights from {args.controlnet_model_name_or_path}") controlnet.load_state_dict(torch.load(args.controlnet_model_name_or_path)) if args.use_boft: config = BOFTConfig( boft_block_size=args.boft_block_size, boft_block_num=args.boft_block_num, boft_n_butterfly_factor=args.boft_n_butterfly_factor, target_modules=UNET_TARGET_MODULES, boft_dropout=args.boft_dropout, bias=args.boft_bias, ) unet = get_peft_model(unet, config) unet.print_trainable_parameters() vae.requires_grad_(False) controlnet.requires_grad_(True) if not args.train_text_encoder: text_encoder.requires_grad_(False) unet.train() controlnet.train() if args.train_text_encoder and args.use_boft: config = BOFTConfig( boft_block_size=args.boft_block_size, boft_block_num=args.boft_block_num, boft_n_butterfly_factor=args.boft_n_butterfly_factor, target_modules=TEXT_ENCODER_TARGET_MODULES, boft_dropout=args.boft_dropout, bias=args.boft_bias, ) text_encoder = get_peft_model(text_encoder, config, adapter_name=args.wandb_run_name) text_encoder.print_trainable_parameters() if args.train_text_encoder: text_encoder.train() # For mixed precision training we cast the text_encoder and vae weights to half-precision # as these models are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # Move unet, vae and text_encoder to device and cast to weight_dtype unet.to(accelerator.device, dtype=weight_dtype) vae.to(accelerator.device, dtype=weight_dtype) controlnet.to(accelerator.device, dtype=weight_dtype) if not args.train_text_encoder: text_encoder.to(accelerator.device, dtype=weight_dtype) if args.enable_xformers_memory_efficient_attention: if is_xformers_available(): import xformers xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() controlnet.enable_xformers_memory_efficient_attention() if args.train_text_encoder and not (args.use_lora or args.use_boft or args.use_oft): text_encoder.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") if args.gradient_checkpointing: controlnet.enable_gradient_checkpointing() unet.enable_gradient_checkpointing() if args.train_text_encoder and not (args.use_lora or args.use_boft or args.use_oft): text_encoder.gradient_checkpointing_enable() # Check that all trainable models are in full precision low_precision_error_string = ( " Please make sure to always have all model weights in full float32 precision when starting training - even if" " doing mixed precision training, copy of the weights should still be float32." ) if accelerator.unwrap_model(controlnet).dtype != torch.float32: raise ValueError( f"Controlnet loaded as datatype {accelerator.unwrap_model(controlnet).dtype}. {low_precision_error_string}" ) if accelerator.unwrap_model(unet).dtype != torch.float32: raise ValueError( f"UNet loaded as datatype {accelerator.unwrap_model(unet).dtype}. {low_precision_error_string}" ) # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if args.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True if args.scale_lr: args.learning_rate = ( args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." ) optimizer_class = bnb.optim.AdamW8bit else: optimizer_class = torch.optim.AdamW params_to_optimize = [param for param in controlnet.parameters() if param.requires_grad] params_to_optimize += [param for param in unet.parameters() if param.requires_grad] if args.train_text_encoder: params_to_optimize += [param for param in text_encoder.parameters() if param.requires_grad] # Optimizer creation optimizer = optimizer_class( params_to_optimize, lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) # Load the dataset train_dataset = make_dataset(args, tokenizer, accelerator, "train") val_dataset = make_dataset(args, tokenizer, accelerator, "test") train_dataloader = torch.utils.data.DataLoader( train_dataset, shuffle=True, collate_fn=collate_fn, batch_size=args.train_batch_size, num_workers=args.dataloader_num_workers, ) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, num_cycles=args.lr_num_cycles, power=args.lr_power, ) # Prepare everything with our `accelerator`. controlnet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( controlnet, optimizer, train_dataloader, lr_scheduler ) if args.train_text_encoder: text_encoder = accelerator.prepare(text_encoder) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: accelerator.init_trackers(args.wandb_project_name, config=vars(args), init_kwargs=wandb_init) # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num batches each epoch = {len(train_dataloader)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = os.listdir(args.output_dir) if "checkpoint-current" in dirs: path = "checkpoint-current" dirs = [d for d in dirs if d.startswith("checkpoint") and d.endswith("0")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) else: dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] if len(dirs) > 0 else None if path is None: accelerator.print( f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." ) args.resume_from_checkpoint = None initial_global_step = 0 else: accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(os.path.join(args.output_dir, path)) if path.split("-")[1] == "current": global_step = int(dirs[-1].split("-")[1]) else: global_step = int(path.split("-")[1]) initial_global_step = global_step resume_global_step = global_step * args.gradient_accumulation_steps first_epoch = global_step // num_update_steps_per_epoch resume_step = resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps) else: initial_global_step = 0 progress_bar = tqdm( range(0, args.max_train_steps), initial=initial_global_step, desc="Steps", disable=not accelerator.is_local_main_process, ) progress_bar.set_description("Steps") for epoch in range(first_epoch, args.num_train_epochs): with TorchTracemalloc() as tracemalloc: for step, batch in enumerate(train_dataloader): # Skip steps until we reach the resumed step if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step: if step % args.gradient_accumulation_steps == 0: progress_bar.update(1) if args.report_to == "wandb": accelerator.print(progress_bar) continue with accelerator.accumulate(controlnet), accelerator.accumulate(unet): # Convert images to latent space latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample() latents = latents * vae.config.scaling_factor # Sample noise that we'll add to the latents noise = torch.randn_like(latents) bsz = latents.shape[0] # Sample a random timestep for each image timesteps = torch.randint( 0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device ) timesteps = timesteps.long() # Add noise to the latents according to the noise magnitude at each timestep # (this is the forward diffusion process) noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) # Get the text embedding for conditioning encoder_hidden_states = text_encoder(batch["input_ids"])[0] controlnet_image = batch["conditioning_pixel_values"].to(dtype=weight_dtype) # Get the guided hint for the UNet (320 dim) guided_hint = controlnet( controlnet_cond=controlnet_image, ) # Predict the noise residual model_pred = unet( noisy_latents, timesteps, guided_hint=guided_hint, encoder_hidden_states=encoder_hidden_states, ).sample # Get the target for loss depending on the prediction type if noise_scheduler.config.prediction_type == "epsilon": target = noise elif noise_scheduler.config.prediction_type == "v_prediction": target = noise_scheduler.get_velocity(latents, noise, timesteps) else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") accelerator.backward(loss) if accelerator.sync_gradients: params_to_clip = ( itertools.chain(controlnet.parameters(), text_encoder.parameters()) if args.train_text_encoder else itertools.chain( controlnet.parameters(), ) ) accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad(set_to_none=args.set_grads_to_none) # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) if args.report_to == "wandb": accelerator.print(progress_bar) global_step += 1 step_save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") if accelerator.is_main_process: if global_step % args.validation_steps == 0 or global_step == 1: logger.info(f"Running validation... \n Generating {args.num_validation_images} images.") logger.info("Running validation... ") with torch.no_grad(): log_validation(val_dataset, text_encoder, unet, controlnet, args, accelerator) if global_step % args.checkpointing_steps == 0: save_adaptor(accelerator, step_save_path, {"controlnet": controlnet, "unet": unet}) # save text_encoder if any if args.train_text_encoder: save_adaptor(accelerator, step_save_path, {"text_encoder": text_encoder}) accelerator.save_state(step_save_path) logger.info(f"Saved {global_step} state to {step_save_path}") logger.info(f"Saved current state to {step_save_path}") logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) accelerator.log(logs, step=global_step) if global_step >= args.max_train_steps: break # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print(f"GPU Memory before entering the train : {b2mb(tracemalloc.begin)}") accelerator.print(f"GPU Memory consumed at the end of the train (end-begin): {tracemalloc.used}") accelerator.print(f"GPU Peak Memory consumed during the train (max-begin): {tracemalloc.peaked}") accelerator.print( f"GPU Total Peak Memory consumed during the train (max): {tracemalloc.peaked + b2mb(tracemalloc.begin)}" ) accelerator.print(f"CPU Memory before entering the train : {b2mb(tracemalloc.cpu_begin)}") accelerator.print(f"CPU Memory consumed at the end of the train (end-begin): {tracemalloc.cpu_used}") accelerator.print(f"CPU Peak Memory consumed during the train (max-begin): {tracemalloc.cpu_peaked}") accelerator.print( f"CPU Total Peak Memory consumed during the train (max): {tracemalloc.cpu_peaked + b2mb(tracemalloc.cpu_begin)}" ) # Create the pipeline using using the trained modules and save it. accelerator.wait_for_everyone() accelerator.end_training() if __name__ == "__main__": args = parse_args() main(args)
peft/examples/boft_controlnet/train_controlnet.py/0
{ "file_path": "peft/examples/boft_controlnet/train_controlnet.py", "repo_id": "peft", "token_count": 10032 }
<jupyter_start><jupyter_text>PEFT with DNA Language Models This notebook demonstrates how to utilize parameter-efficient fine-tuning techniques (PEFT) from the PEFT library to fine-tune a DNA Language Model (DNA-LM). The fine-tuned DNA-LM will be applied to solve a task from the nucleotide benchmark dataset. Parameter-efficient fine-tuning (PEFT) techniques are crucial for adapting large pre-trained models to specific tasks with limited computational resources. 1. Import relevant libraries We'll start by importing the required libraries, including the PEFT library and other dependencies.<jupyter_code>import torch import transformers import peft import tqdm import numpy as np<jupyter_output>/opt/homebrew/anaconda3/envs/peft/lib/python3.12/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html from .autonotebook import tqdm as notebook_tqdm<jupyter_text>2. Load models We'll load a pre-trained DNA Language Model, "SpeciesLM", that serves as the base for fine-tuning. This is done using the transformers library from HuggingFace.The tokenizer and the model comes from the paper, "Species-aware DNA language models capture regulatory elements and their evolution". [Paper Link](https://www.biorxiv.org/content/10.1101/2023.01.26.525670v2), [Code Link](https://github.com/gagneurlab/SpeciesLM). They introduce a species-aware DNA language model, which is trained on more than 800 species spanning over 500 million years of evolution.<jupyter_code>from transformers import AutoTokenizer, AutoModelForMaskedLM tokenizer = AutoTokenizer.from_pretrained("gagneurlab/SpeciesLM", revision = "downstream_species_lm") lm = AutoModelForMaskedLM.from_pretrained("gagneurlab/SpeciesLM", revision = "downstream_species_lm") lm.eval() lm.to("cuda");<jupyter_output><empty_output><jupyter_text>2. Prepare datasets We'll load the `nucleotide_transformer_downstream_tasks` dataset, which contains 18 downstream tasks from the Nucleotide Transformer paper. This dataset provides a consistent genomics benchmark with binary classification tasks.<jupyter_code>from datasets import load_dataset raw_data = load_dataset("InstaDeepAI/nucleotide_transformer_downstream_tasks", "H3")<jupyter_output><empty_output><jupyter_text>We'll use the "H3" subset of this dataset, which contains a total of 13,468 rows in the training data, and 1497 rows in the test data.<jupyter_code>raw_data<jupyter_output><empty_output><jupyter_text>The dataset consists of three columns, ```sequence```, ```name``` and ```label```. An row in this dataset looks like:<jupyter_code>raw_data['train'][0]<jupyter_output><empty_output><jupyter_text>We split out dataset into training, test, and validation sets.<jupyter_code>from datasets import Dataset, DatasetDict train_valid_split = raw_data['train'].train_test_split(test_size=0.15, seed=42) train_valid_split = DatasetDict({ 'train': train_valid_split['train'], 'validation': train_valid_split['test'] }) ds = DatasetDict({ 'train': train_valid_split['train'], 'validation': train_valid_split['validation'], 'test': raw_data['test'] })<jupyter_output><empty_output><jupyter_text>Then, we use the tokenizer and a utility function we created, ```get_kmers``` to generate the final data and labels. The ```get_kmers``` function is essential for generating overlapping 6-mers needed by the language model (LM). By using k=6 and stride=1, we ensure that the model receives continuous and overlapping subsequences, capturing the local context within the biological sequence for more effective analysis and prediction.<jupyter_code>def get_kmers(seq, k=6, stride=1): return [seq[i:i + k] for i in range(0, len(seq), stride) if i + k <= len(seq)] test_sequences = [] train_sequences = [] val_sequences = [] dataset_limit = 200 # NOTE: This dataset limit is set to 200, so that the training runs faster. It can be set to None to use the # entire dataset for i in range(0, len(ds['train'])): if dataset_limit and i == dataset_limit: break sequence = ds['train'][i]['sequence'] sequence = "candida_glabrata " + " ".join(get_kmers(sequence)) sequence = tokenizer(sequence)["input_ids"] train_sequences.append(sequence) for i in range(0, len(ds['validation'])): if dataset_limit and i == dataset_limit: break sequence = ds['validation'][i]['sequence'] sequence = "candida_glabrata " + " ".join(get_kmers(sequence)) sequence = tokenizer(sequence)["input_ids"] val_sequences.append(sequence) for i in range(0, len(ds['test'])): if dataset_limit and i == dataset_limit: break sequence = ds['test'][i]['sequence'] sequence = "candida_glabrata " + " ".join(get_kmers(sequence)) sequence = tokenizer(sequence)["input_ids"] test_sequences.append(sequence) train_labels = ds['train']['label'] test_labels = ds['test']['label'] val_labels = ds['validation']['label'] if dataset_limit: train_labels = train_labels[0:dataset_limit] test_labels = test_labels[0:dataset_limit] val_labels = val_labels[0:dataset_limit]<jupyter_output><empty_output><jupyter_text>Finally, we create a Dataset object for each our sets.<jupyter_code>from datasets import Dataset train_dataset = Dataset.from_dict({"input_ids": train_sequences, "labels": train_labels}) val_dataset = Dataset.from_dict({"input_ids": val_sequences, "labels": val_labels}) test_dataset = Dataset.from_dict({"input_ids": test_sequences, "labels": test_labels})<jupyter_output><empty_output><jupyter_text>4. Train model Now, we'll train our DNA Language Model with the training dataset. We'll add a linear layer in the final layer of our language model, and then, train all the parameteres of our model with the training dataset.<jupyter_code>from transformers import DataCollatorWithPadding data_collator = DataCollatorWithPadding(tokenizer=tokenizer) import torch from torch import nn class DNA_LM(nn.Module): def __init__(self, model, num_labels): super(DNA_LM, self).__init__() self.model = model.bert self.in_features = model.config.hidden_size self.out_features = num_labels self.classifier = nn.Linear(self.in_features, self.out_features) def forward(self, input_ids, attention_mask=None, labels=None): outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, output_hidden_states=True) sequence_output = outputs.hidden_states[-1] # Use the [CLS] token for classification cls_output = sequence_output[:, 0, :] logits = self.classifier(cls_output) loss = None if labels is not None: loss_fct = nn.CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.out_features), labels.view(-1)) return (loss, logits) if loss is not None else logits # Number of classes for your classification task num_labels = 2 classification_model = DNA_LM(lm, num_labels) classification_model.to('cuda'); from transformers import DataCollatorWithPadding data_collator = DataCollatorWithPadding(tokenizer=tokenizer) from transformers import Trainer, TrainingArguments # Define training arguments training_args = TrainingArguments( output_dir='./results', eval_strategy="epoch", learning_rate=2e-5, per_device_train_batch_size=16, per_device_eval_batch_size=16, num_train_epochs=5, weight_decay=0.01, eval_steps=1, logging_steps=1, ) # Initialize Trainer trainer = Trainer( model=classification_model, args=training_args, train_dataset=train_dataset, eval_dataset=val_dataset, tokenizer=tokenizer, data_collator=data_collator, ) # Train the model trainer.train()<jupyter_output><empty_output><jupyter_text>5. Evaluation<jupyter_code># Generate predictions predictions = trainer.predict(test_dataset) logits = predictions.predictions predicted_labels = logits.argmax(axis=-1) print(predicted_labels)<jupyter_output><empty_output><jupyter_text>Then, we create a function to calculate the accuracy from the test and predicted labels.<jupyter_code>def calculate_accuracy(true_labels, predicted_labels): assert len(true_labels) == len(predicted_labels), "Arrays must have the same length" correct_predictions = np.sum(true_labels == predicted_labels) accuracy = correct_predictions / len(true_labels) return accuracy accuracy = calculate_accuracy(test_labels, predicted_labels) print(f"Accuracy: {accuracy:.2f}")<jupyter_output>Accuracy: 0.53<jupyter_text>The results aren't that good, which we can attribute to the small dataset size. 7. Parameter Efficient Fine-Tuning Techniques In this section, we demonstrate how to employ parameter-efficient fine-tuning (PEFT) techniques to adapt a pre-trained model for specific genomics tasks using the PEFT library. The LoraConfig object is instantiated to configure the PEFT parameters:- task_type: Specifies the type of task, in this case, sequence classification (SEQ_CLS).- r: The rank of the LoRA matrices.- lora_alpha: Scaling factor for adaptive re-parameterization.- target_modules: Modules within the model to apply PEFT re-parameterization (query, key, value in this example).- lora_dropout: Dropout rate used during PEFT fine-tuning.<jupyter_code># Number of classes for your classification task num_labels = 2 classification_model = DNA_LM(lm, num_labels) classification_model.to('cuda'); from peft import LoraConfig, TaskType peft_config = LoraConfig( r=8, lora_alpha=32, target_modules=["query", "key", "value"], lora_dropout=0.01, ) from peft import get_peft_model peft_model = get_peft_model(classification_model, peft_config) peft_model.print_trainable_parameters() peft_model # Define training arguments training_args = TrainingArguments( output_dir='./results', eval_strategy="epoch", learning_rate=2e-5, per_device_train_batch_size=16, per_device_eval_batch_size=16, num_train_epochs=5, weight_decay=0.01, eval_steps=1, logging_steps=1, ) # Initialize Trainer trainer = Trainer( model=peft_model.model, args=training_args, train_dataset=train_dataset, eval_dataset=val_dataset, tokenizer=tokenizer, data_collator=data_collator, ) # Train the model trainer.train()<jupyter_output><empty_output><jupyter_text>8. Evaluate PEFT Model<jupyter_code># Generate predictions predictions = trainer.predict(test_dataset) logits = predictions.predictions predicted_labels = logits.argmax(axis=-1) print(predicted_labels) def calculate_accuracy(true_labels, predicted_labels): assert len(true_labels) == len(predicted_labels), "Arrays must have the same length" correct_predictions = np.sum(true_labels == predicted_labels) accuracy = correct_predictions / len(true_labels) return accuracy accuracy = calculate_accuracy(test_labels, predicted_labels) print(f"Accuracy: {accuracy:.2f}")<jupyter_output>Accuracy: 0.52
peft/examples/dna_language_models/dna_lm.ipynb/0
{ "file_path": "peft/examples/dna_language_models/dna_lm.ipynb", "repo_id": "peft", "token_count": 3782 }
accelerate launch --config_file config.yaml peft_adalora_whisper_large_training.py \ --model_name_or_path "openai/whisper-large-v2" \ --language "Marathi" \ --language_abbr "mr" \ --task "transcribe" \ --dataset_name "mozilla-foundation/common_voice_11_0" \ --push_to_hub \ --preprocessing_num_workers 2 \ --per_device_train_batch_size 8 \ --per_device_eval_batch_size 8 \ --dataloader_pin_memory \ --dataloader_num_workers 2 \ --learning_rate 1e-3 \ --weight_decay 1e-4 \ --num_train_epochs 3 \ --gradient_accumulation_steps 1 \ --lr_scheduler_type "linear" \ --num_warmup_steps 50 \ --output_dir "adalora_whisper_large_marathi_multi_adapter" \ --seed 42 \ --load_best_model \ --with_tracking \ --report_to "wandb" \ --hub_token $HUB_TOKEN \ --checkpointing_steps 2000 \ --evaluation_steps 2000 \ --logging_steps 25 \ --use_peft \ --use_adalora \ --init_r 12 \ --target_r 8 \ --tinit 100 \ --tfinal 800 \ --delta_t 10 \ --lora_alpha 32 \ --lora_dropout 0.1 \ --orth_reg_weight 0.5
peft/examples/int8_training/run_adalora_whisper_int8.sh/0
{ "file_path": "peft/examples/int8_training/run_adalora_whisper_int8.sh", "repo_id": "peft", "token_count": 509 }
<jupyter_start><jupyter_text>Dreambooth with OFTThis Notebook assumes that you already ran the train_dreambooth.py script to create your own adapter.<jupyter_code>from diffusers import DiffusionPipeline from diffusers.utils import check_min_version, get_logger from peft import PeftModel # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.10.0.dev0") logger = get_logger(__name__) BASE_MODEL_NAME = "stabilityai/stable-diffusion-2-1-base" ADAPTER_MODEL_PATH = "INSERT MODEL PATH HERE" pipe = DiffusionPipeline.from_pretrained( BASE_MODEL_NAME, ) pipe.to("cuda") pipe.unet = PeftModel.from_pretrained(pipe.unet, ADAPTER_MODEL_PATH + "/unet", adapter_name="default") pipe.text_encoder = PeftModel.from_pretrained( pipe.text_encoder, ADAPTER_MODEL_PATH + "/text_encoder", adapter_name="default" ) prompt = "A photo of a sks dog" image = pipe( prompt, num_inference_steps=50, height=512, width=512, ).images[0] image<jupyter_output><empty_output>
peft/examples/oft_dreambooth/oft_dreambooth_inference.ipynb/0
{ "file_path": "peft/examples/oft_dreambooth/oft_dreambooth_inference.ipynb", "repo_id": "peft", "token_count": 376 }
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This is a minimal example of launching PEFT with Accelerate. This used to cause issues because PEFT would eagerly # import bitsandbytes, which initializes CUDA, resulting in: # > RuntimeError: Cannot re-initialize CUDA in forked subprocess. To use CUDA with multiprocessing, you must use the # > 'spawn' start method # This script exists to ensure that this issue does not reoccur. import torch from accelerate import notebook_launcher import peft def init(): class MyModule(torch.nn.Module): def __init__(self): super().__init__() self.linear = torch.nn.Linear(1, 2) def forward(self, x): return self.linear(x) model = MyModule().to("cuda") peft.get_peft_model(model, peft.LoraConfig(target_modules=["linear"])) def main(): notebook_launcher(init, (), num_processes=2) if __name__ == "__main__": main()
peft/scripts/launch_notebook_mp.py/0
{ "file_path": "peft/scripts/launch_notebook_mp.py", "repo_id": "peft", "token_count": 474 }
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .adalora import AdaLoraConfig, AdaLoraModel from .adaption_prompt import AdaptionPromptConfig, AdaptionPromptModel from .boft import BOFTConfig, BOFTModel from .bone import BoneConfig, BoneModel from .cpt import CPTConfig, CPTEmbedding from .fourierft import FourierFTConfig, FourierFTModel from .hra import HRAConfig, HRAModel from .ia3 import IA3Config, IA3Model from .ln_tuning import LNTuningConfig, LNTuningModel from .loha import LoHaConfig, LoHaModel from .lokr import LoKrConfig, LoKrModel from .lora import ( EvaConfig, LoftQConfig, LoraConfig, LoraModel, LoraRuntimeConfig, get_eva_state_dict, initialize_lora_eva_weights, ) from .mixed import MixedModel from .multitask_prompt_tuning import MultitaskPromptEmbedding, MultitaskPromptTuningConfig, MultitaskPromptTuningInit from .oft import OFTConfig, OFTModel from .p_tuning import PromptEncoder, PromptEncoderConfig, PromptEncoderReparameterizationType from .poly import PolyConfig, PolyModel from .prefix_tuning import PrefixEncoder, PrefixTuningConfig from .prompt_tuning import PromptEmbedding, PromptTuningConfig, PromptTuningInit from .vblora import VBLoRAConfig, VBLoRAModel from .vera import VeraConfig, VeraModel from .xlora import XLoraConfig, XLoraModel __all__ = [ "AdaLoraConfig", "AdaLoraModel", "AdaptionPromptConfig", "AdaptionPromptModel", "BOFTConfig", "BOFTModel", "BoneConfig", "BoneModel", "CPTConfig", "CPTEmbedding", "EvaConfig", "FourierFTConfig", "FourierFTModel", "HRAConfig", "HRAModel", "IA3Config", "IA3Model", "LNTuningConfig", "LNTuningModel", "LoHaConfig", "LoHaModel", "LoKrConfig", "LoKrModel", "LoftQConfig", "LoraConfig", "LoraModel", "LoraRuntimeConfig", "MixedModel", "MultitaskPromptEmbedding", "MultitaskPromptTuningConfig", "MultitaskPromptTuningInit", "OFTConfig", "OFTModel", "PolyConfig", "PolyModel", "PrefixEncoder", "PrefixTuningConfig", "PromptEmbedding", "PromptEncoder", "PromptEncoderConfig", "PromptEncoderReparameterizationType", "PromptTuningConfig", "PromptTuningInit", "VBLoRAConfig", "VBLoRAModel", "VeraConfig", "VeraModel", "XLoraConfig", "XLoraModel", "get_eva_state_dict", "initialize_lora_eva_weights", ]
peft/src/peft/tuners/__init__.py/0
{ "file_path": "peft/src/peft/tuners/__init__.py", "repo_id": "peft", "token_count": 1113 }
#include <torch/torch.h> #include <vector> #include <iostream> #include <torch/extension.h> std::vector<at::Tensor> forward_fast_block_diag_cuda( at::Tensor input); std::vector<at::Tensor> forward_fast_block_diag( at::Tensor input ) { return forward_fast_block_diag_cuda(input); } std::vector<at::Tensor> backward_fast_block_diag_cuda( at::Tensor grad_output, at::Tensor input); std::vector<at::Tensor> backward_fast_block_diag( at::Tensor grad_output, at::Tensor input ) { return backward_fast_block_diag_cuda(grad_output, input); } PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def("forward", &forward_fast_block_diag, "FAST BLOCK DIAG (CUDA)"); m.def("backward", &backward_fast_block_diag, "FAST BLOCK DIAG backward (CUDA)"); }
peft/src/peft/tuners/boft/fbd/fbd_cuda.cpp/0
{ "file_path": "peft/src/peft/tuners/boft/fbd/fbd_cuda.cpp", "repo_id": "peft", "token_count": 370 }
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import math import operator import warnings from contextlib import contextmanager from dataclasses import asdict, replace from enum import Enum from functools import partial, reduce from typing import Literal, Optional import torch from torch import nn from tqdm import tqdm from peft.import_utils import is_bnb_4bit_available, is_bnb_available from peft.tuners.tuners_utils import ( BaseTuner, BaseTunerLayer, check_target_module_exists, onload_layer, replicate_layers, ) from peft.utils import ( TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING, ModulesToSaveWrapper, _freeze_adapter, _get_submodules, get_peft_model_state_dict, get_quantization_config, ) from peft.utils.merge_utils import dare_linear, dare_ties, magnitude_prune, task_arithmetic, ties from peft.utils.other import get_pattern_key from .aqlm import dispatch_aqlm from .awq import dispatch_awq from .config import LoraConfig from .eetq import dispatch_eetq from .gptq import dispatch_gptq from .hqq import dispatch_hqq from .layer import Conv2d, LoraLayer, dispatch_default from .torchao import dispatch_torchao from .tp_layer import dispatch_megatron def _adapter_names_pre_forward_hook(target, args, kwargs, adapter_names): # pre-forward hook to inject the adapter_names argument when using mixed adapter batches inference kwargs["adapter_names"] = adapter_names return args, kwargs class LoraModel(BaseTuner): """ Creates Low Rank Adapter (LoRA) model from a pretrained transformers model. The method is described in detail in https://arxiv.org/abs/2106.09685. Args: model ([`torch.nn.Module`]): The model to be adapted. config ([`LoraConfig`]): The configuration of the Lora model. adapter_name (`str`): The name of the adapter, defaults to `"default"`. low_cpu_mem_usage (`bool`, `optional`, defaults to `False`): Create empty adapter weights on meta device. Useful to speed up the loading process. Returns: `torch.nn.Module`: The Lora model. Example: ```py >>> from transformers import AutoModelForSeq2SeqLM >>> from peft import LoraModel, LoraConfig >>> config = LoraConfig( ... task_type="SEQ_2_SEQ_LM", ... r=8, ... lora_alpha=32, ... target_modules=["q", "v"], ... lora_dropout=0.01, ... ) >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> lora_model = LoraModel(model, config, "default") ``` ```py >>> import torch >>> import transformers >>> from peft import LoraConfig, PeftModel, get_peft_model, prepare_model_for_kbit_training >>> rank = ... >>> target_modules = ["q_proj", "k_proj", "v_proj", "out_proj", "fc_in", "fc_out", "wte"] >>> config = LoraConfig( ... r=4, lora_alpha=16, target_modules=target_modules, lora_dropout=0.1, bias="none", task_type="CAUSAL_LM" ... ) >>> quantization_config = transformers.BitsAndBytesConfig(load_in_8bit=True) >>> tokenizer = transformers.AutoTokenizer.from_pretrained( ... "kakaobrain/kogpt", ... revision="KoGPT6B-ryan1.5b-float16", # or float32 version: revision=KoGPT6B-ryan1.5b ... bos_token="[BOS]", ... eos_token="[EOS]", ... unk_token="[UNK]", ... pad_token="[PAD]", ... mask_token="[MASK]", ... ) >>> model = transformers.GPTJForCausalLM.from_pretrained( ... "kakaobrain/kogpt", ... revision="KoGPT6B-ryan1.5b-float16", # or float32 version: revision=KoGPT6B-ryan1.5b ... pad_token_id=tokenizer.eos_token_id, ... use_cache=False, ... device_map={"": rank}, ... torch_dtype=torch.float16, ... quantization_config=quantization_config, ... ) >>> model = prepare_model_for_kbit_training(model) >>> lora_model = get_peft_model(model, config) ``` **Attributes**: - **model** ([`~transformers.PreTrainedModel`]) -- The model to be adapted. - **peft_config** ([`LoraConfig`]): The configuration of the Lora model. """ prefix: str = "lora_" def __init__(self, model, config, adapter_name, low_cpu_mem_usage: bool = False) -> None: super().__init__(model, config, adapter_name, low_cpu_mem_usage=low_cpu_mem_usage) def _check_new_adapter_config(self, config: LoraConfig) -> None: """ A helper method to check the config when a new adapter is being added. Raise a ValueError if there is something wrong with the config or if it conflicts with existing adapters. """ # TODO: there should be a check if any of the existing adapters actually has bias != "none", or else the check # does not fully correspond to the error message. if (len(self.peft_config) > 1) and (config.bias != "none"): raise ValueError( f"{self.__class__.__name__} supports only 1 adapter with bias. When using multiple adapters, " "set bias to 'none' for all adapters." ) @staticmethod def _check_target_module_exists(lora_config, key): return check_target_module_exists(lora_config, key) def _prepare_model(self, peft_config: LoraConfig, model: nn.Module): r""" A private method to modify the model structure before adapter is applied. Args: peft_config (`PeftConfig`): The prepared adapter config. model (`nn.Module`): The model that is going to be adapted. """ if peft_config.layer_replication: replicate_layers(model, peft_config.layer_replication) def _create_and_replace( self, lora_config, adapter_name, target, target_name, parent, current_key, ): if current_key is None: raise ValueError("Current Key shouldn't be `None`") # Regexp matching - Find key which matches current target_name in patterns provided r_key = get_pattern_key(lora_config.rank_pattern.keys(), current_key) alpha_key = get_pattern_key(lora_config.alpha_pattern.keys(), current_key) r = lora_config.rank_pattern.get(r_key, lora_config.r) alpha = lora_config.alpha_pattern.get(alpha_key, lora_config.lora_alpha) kwargs = { "r": r, "lora_alpha": alpha, "lora_dropout": lora_config.lora_dropout, "fan_in_fan_out": lora_config.fan_in_fan_out, "init_lora_weights": lora_config.init_lora_weights, "use_rslora": lora_config.use_rslora, "use_dora": lora_config.use_dora, "ephemeral_gpu_offload": lora_config.runtime_config.ephemeral_gpu_offload, "lora_bias": lora_config.lora_bias, "loaded_in_8bit": getattr(self.model, "is_loaded_in_8bit", False), "loaded_in_4bit": getattr(self.model, "is_loaded_in_4bit", False), } # for torchao merging, we need the get_apply_tensor_subclass from the quantization config try: kwargs["get_apply_tensor_subclass"] = operator.attrgetter( "hf_quantizer.quantization_config.get_apply_tensor_subclass" )(self.model) except AttributeError: pass quant_methods = ["gptq", "aqlm", "awq"] for quant_method in quant_methods: quantization_config = get_quantization_config(self.model, method=quant_method) if quantization_config is not None: kwargs[f"{quant_method}_quantization_config"] = quantization_config # note: AdaLoraLayer is a subclass of LoraLayer, we need to exclude it from peft.tuners.adalora import AdaLoraLayer if isinstance(target, LoraLayer) and not isinstance(target, AdaLoraLayer): target.update_layer( adapter_name, r, lora_alpha=alpha, lora_dropout=lora_config.lora_dropout, init_lora_weights=lora_config.init_lora_weights, use_rslora=lora_config.use_rslora, use_dora=lora_config.use_dora, lora_bias=lora_config.lora_bias, ) else: device_map = self.model.hf_device_map if hasattr(self.model, "hf_device_map") else None new_module = self._create_new_module(lora_config, adapter_name, target, device_map=device_map, **kwargs) if adapter_name not in self.active_adapters: # adding an additional adapter: it is not automatically trainable new_module.requires_grad_(False) self._replace_module(parent, target_name, new_module, target) def _replace_module(self, parent, child_name, new_module, child): setattr(parent, child_name, new_module) # It's not necessary to set requires_grad here, as that is handled by # _mark_only_adapters_as_trainable # child layer wraps the original module, unpack it if hasattr(child, "base_layer"): child = child.base_layer meta = torch.device("meta") # dispatch to correct device for name, module in new_module.named_modules(): if (self.prefix in name) or ("ranknum" in name): if hasattr(child, "qweight"): weight = child.qweight elif hasattr(child, "W_q"): weight = child.W_q elif hasattr(child, "weight"): weight = child.weight elif getattr(child, "in_proj_weight", None) is not None: # MHA weight = child.in_proj_weight else: weight = next(child.parameters()) if not any(p.device == meta for p in module.parameters()): module.to(weight.device) def _mark_only_adapters_as_trainable(self, model: nn.Module) -> None: for n, p in model.named_parameters(): if self.prefix not in n: p.requires_grad = False for active_adapter in self.active_adapters: bias = self.peft_config[active_adapter].bias if bias == "none": continue if bias == "all": for n, p in model.named_parameters(): if "bias" in n: p.requires_grad = True elif bias == "lora_only": for m in model.modules(): if isinstance(m, LoraLayer) and hasattr(m, "bias") and m.bias is not None: m.bias.requires_grad = True else: raise NotImplementedError(f"Requested bias: {bias}, is not implemented.") @staticmethod def _create_new_module(lora_config, adapter_name, target, **kwargs): # Collect dispatcher functions to decide what backend to use for the replaced LoRA layer. The order matters, # because the first match is always used. Therefore, the default layers should be checked last. dispatchers = [] if lora_config._custom_modules: # Experimental custom LoRA module support. Allows users to pass a custom mapping for unsupported layer # types by impelementing their own LoRA layers. def dynamic_dispatch_func(target, adapter_name, lora_config, **kwargs): new_module = None if isinstance(target, BaseTunerLayer): target_base_layer = target.get_base_layer() else: target_base_layer = target for key, custom_cls in lora_config._custom_modules.items(): if isinstance(target_base_layer, key): new_module = custom_cls(target, adapter_name, **kwargs) break return new_module dispatchers.append(dynamic_dispatch_func) # avoid eager bnb import if is_bnb_available(): from .bnb import dispatch_bnb_8bit dispatchers.append(dispatch_bnb_8bit) if is_bnb_4bit_available(): from .bnb import dispatch_bnb_4bit dispatchers.append(dispatch_bnb_4bit) dispatchers.extend( [ dispatch_eetq, dispatch_aqlm, dispatch_awq, dispatch_gptq, dispatch_hqq, dispatch_torchao, dispatch_megatron, dispatch_default, ] ) new_module = None for dispatcher in dispatchers: new_module = dispatcher(target, adapter_name, lora_config=lora_config, **kwargs) if new_module is not None: # first match wins break if new_module is None: # no module could be matched raise ValueError( f"Target module {target} is not supported. Currently, only the following modules are supported: " "`torch.nn.Linear`, `torch.nn.Embedding`, `torch.nn.Conv1d`, `torch.nn.Conv2d`, `torch.nn.Conv3d`, " "`transformers.pytorch_utils.Conv1D`, `torch.nn.MultiheadAttention.`." ) return new_module def __getattr__(self, name: str): """Forward missing attributes to the wrapped module.""" try: return super().__getattr__(name) # defer to nn.Module's logic except AttributeError: if name == "model": # see #1892: prevent infinite recursion if class is not initialized raise return getattr(self.model, name) def get_peft_config_as_dict(self, inference: bool = False): config_dict = {} for key, value in self.peft_config.items(): config = {k: v.value if isinstance(v, Enum) else v for k, v in asdict(value).items()} if inference: config["inference_mode"] = True config_dict[key] = config return config def _set_adapter_layers(self, enabled: bool = True) -> None: for module in self.model.modules(): if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)): module.enable_adapters(enabled) def enable_adapter_layers(self) -> None: """Enable all adapters. Call this if you have previously disabled all adapters and want to re-enable them. """ self._set_adapter_layers(enabled=True) def disable_adapter_layers(self) -> None: """Disable all adapters. When disabling all adapters, the model output corresponds to the output of the base model. """ for active_adapter in self.active_adapters: val = self.peft_config[active_adapter].bias if val != "none": msg = ( f"Careful, disabling adapter layers with bias configured to be '{val}' does not produce the same " "output as the the base model would without adaption." ) warnings.warn(msg) self._set_adapter_layers(enabled=False) def set_adapter(self, adapter_name: str | list[str]) -> None: """Set the active adapter(s). Additionally, this function will set the specified adapters to trainable (i.e., requires_grad=True). If this is not desired, use the following code. ```py >>> for name, param in model_peft.named_parameters(): ... if ...: # some check on name (ex. if 'lora' in name) ... param.requires_grad = False ``` Args: adapter_name (`str` or `list[str]`): Name of the adapter(s) to be activated. """ for module in self.model.modules(): if isinstance(module, LoraLayer): if module.merged: warnings.warn("Adapter cannot be set when the model is merged. Unmerging the model first.") module.unmerge() module.set_adapter(adapter_name) self.active_adapter = adapter_name @contextmanager def _enable_peft_forward_hooks(self, *args, **kwargs): # If adapter_names is passed as an argument, we inject it into the forward arguments. adapter_names = kwargs.pop("adapter_names", None) if adapter_names is None: # nothing to do yield return if self.training: raise ValueError("Cannot pass `adapter_names` when the model is in training mode.") # Check that users only passed actually existing adapters. # Note: We cannot do this on the layer level, as each individual layer may not have each adapter. Still, we want # to check that there is at least one layer with the given name, or else something like typos can easily slip. expected_adapters = set() for layer in self.modules(): if isinstance(layer, LoraLayer): expected_adapters |= layer.lora_A.keys() expected_adapters |= layer.lora_embedding_A.keys() unique_adapters = {name for name in adapter_names if name != "__base__"} unexpected_adapters = unique_adapters - expected_adapters if unexpected_adapters: raise ValueError(f"Trying to infer with non-existing adapter(s): {', '.join(sorted(unexpected_adapters))}") # deal with beam search num_beams = kwargs.get("num_beams", None) uses_beam_search = isinstance(num_beams, int) and (num_beams > 1) original_adapter_names = adapter_names[:] if uses_beam_search: if not isinstance(adapter_names, (list, tuple)): raise TypeError(f"Got adapter names of type {type(adapter_names)}, expected a list of str.") # When there is beam search, the inputs are repeated n times, thus we repeat each adapter name n times and # then flatten the nested list. For encoder-decoder models, this extended list should not be applied to the # encoder part. Further below, the original argument is thus restored for the encoder. adapter_names = sum(([n] * kwargs["num_beams"] for n in adapter_names), []) hook_handles = [] for module in self.modules(): if isinstance(module, LoraLayer) or isinstance(module, ModulesToSaveWrapper): pre_forward = partial(_adapter_names_pre_forward_hook, adapter_names=adapter_names) handle = module.register_forward_pre_hook(pre_forward, with_kwargs=True) hook_handles.append(handle) if uses_beam_search and hasattr(self.model, "get_encoder"): # For encoder-decoder models, even when applying beam search, the encoder part of the model should not use # the extended adapter_names. This is because the encoder still uses the original, non-extended samples. for module in self.model.get_encoder().modules(): if isinstance(module, LoraLayer) or isinstance(module, ModulesToSaveWrapper): # Add another hook to overwrite the kwargs with the original adapter names -- this is easier than # trying to exclude the encoder. pre_forward = partial(_adapter_names_pre_forward_hook, adapter_names=original_adapter_names) handle = module.register_forward_pre_hook(pre_forward, with_kwargs=True) hook_handles.append(handle) yield for handle in hook_handles: handle.remove() def _check_merge_allowed(self): """Verify that the configuration supports merging. Currently gptq quantization and replicated layers do not support merging. """ super()._check_merge_allowed() if getattr(self.model, "quantization_method", None) == "gptq": raise ValueError("Cannot merge LORA layers when the model is gptq quantized") if self.peft_config.get("layer_replication"): raise ValueError("Cannot merge LORA layers when base model layers are replicated") @staticmethod def _prepare_adapter_config(peft_config, model_config): if peft_config.target_modules is None: if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING: raise ValueError("Please specify `target_modules` in `peft_config`") peft_config.target_modules = set( TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING[model_config["model_type"]] ) return peft_config def _unload_and_optionally_merge( self, merge=True, progressbar: bool = False, safe_merge: bool = False, adapter_names: Optional[list[str]] = None, ): if merge: self._check_merge_allowed() key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key] desc = "Unloading " + ("and merging " if merge else "") + "model" for key in tqdm(key_list, disable=not progressbar, desc=desc): try: parent, target, target_name = _get_submodules(self.model, key) except AttributeError: continue with onload_layer(target): if hasattr(target, "unload_and_optionally_merge_module"): # if layers have special unloading method, like MultiheadAttention, use that unloaded_module = target.unload_and_optionally_merge_module( merge=merge, safe_merge=safe_merge, adapter_names=adapter_names ) self._replace_module(parent, target_name, unloaded_module, target) elif hasattr(target, "base_layer"): if merge: target.merge(safe_merge=safe_merge, adapter_names=adapter_names) self._replace_module(parent, target_name, target.get_base_layer(), target) elif isinstance(target, ModulesToSaveWrapper): # save any additional trainable modules part of `modules_to_save` new_module = target.modules_to_save[target.active_adapter] if hasattr(new_module, "base_layer"): # check if the module is itself a tuner layer if merge: new_module.merge(safe_merge=safe_merge, adapter_names=adapter_names) new_module = new_module.get_base_layer() setattr(parent, target_name, new_module) return self.model def _check_add_weighted_adapter( self, adapters: list[str], combination_type: str, svd_rank: int | None ) -> tuple[str, int, str]: """ Helper function to check if the arguments to add_weighted_adapter are valid and compatible with the underlying model. """ for adapter in adapters: if adapter not in list(self.peft_config.keys()): raise ValueError(f"Adapter {adapter} does not exist") # If more than one of the adapters targets the same module with modules_to_save, raise an error, as these # modules cannot be merged. First, find the ModulesToSaveWrapper instances in the model, then check if they # have modules for the adapters to be merged. modules_to_save_wrappers = [module for module in self.modules() if isinstance(module, ModulesToSaveWrapper)] problematic_wrappers = [ wrapper for wrapper in modules_to_save_wrappers if sum(adapter in wrapper.modules_to_save for adapter in adapters) > 1 ] if problematic_wrappers: raise ValueError( "Cannot add weighted adapters if they target the same module with modules_to_save, but found " f"{len(problematic_wrappers)} such instance(s)." ) # if there is only one adapter, we can only use linear merging combination_type = "linear" if len(adapters) == 1 else combination_type adapters_ranks = [self.peft_config[adapter].r for adapter in adapters] if combination_type in ("linear", "ties", "dare_ties", "dare_linear", "magnitude_prune"): # all adapters ranks should be same, new rank is just this value if len(set(adapters_ranks)) != 1: raise ValueError( "All adapters must have the same r value when using combination_type linear, ties, dare_ties or " "dare_linear." ) new_rank = adapters_ranks[0] elif combination_type == "cat": # adapters ranks may be different, new rank is sum of all ranks # be careful, because output adapter rank may be really big if mixing a lot of adapters new_rank = sum(adapters_ranks) elif combination_type.endswith("svd"): # new rank is the max of all ranks of the adapters if not provided new_rank = svd_rank or max(adapters_ranks) else: raise ValueError(f"Invalid combination_type: {combination_type}") target_module_types = [type(self.peft_config[adapter].target_modules) for adapter in adapters] if not target_module_types: raise ValueError(f"Found no adapter matching the names in {adapters}") if len(set(target_module_types)) > 1: raise ValueError( "all adapter configs should follow the same target modules type. " "Combining adapters with `target_modules` type being a mix of list/set and string is not supported." ) if target_module_types[0] is str: new_target_modules = "|".join(f"({self.peft_config[adapter].target_modules})" for adapter in adapters) elif target_module_types[0] is set: new_target_modules = reduce( operator.or_, (self.peft_config[adapter].target_modules for adapter in adapters) ) else: raise TypeError(f"Invalid type {target_module_types[0]} found in target_modules") return combination_type, new_rank, new_target_modules def add_weighted_adapter( self, adapters: list[str], weights: list[float], adapter_name: str, combination_type: str = "svd", svd_rank: int | None = None, svd_clamp: int | None = None, svd_full_matrices: bool = True, svd_driver: str | None = None, density: float | None = None, majority_sign_method: Literal["total", "frequency"] = "total", ) -> None: """ This method adds a new adapter by merging the given adapters with the given weights. When using the `cat` combination_type you should be aware that rank of the resulting adapter will be equal to the sum of all adapters ranks. So it's possible that the mixed adapter may become too big and result in OOM errors. Args: adapters (`list`): List of adapter names to be merged. weights (`list`): List of weights for each adapter. adapter_name (`str`): Name of the new adapter. combination_type (`str`): The merging type can be one of [`svd`, `linear`, `cat`, `ties`, `ties_svd`, `dare_ties`, `dare_linear`, `dare_ties_svd`, `dare_linear_svd`, `magnitude_prune`, `magnitude_prune_svd`]. When using the `cat` combination_type, the rank of the resulting adapter is equal to the sum of all adapters ranks (the mixed adapter may be too big and result in OOM errors). svd_rank (`int`, *optional*): Rank of output adapter for svd. If None provided, will use max rank of merging adapters. svd_clamp (`float`, *optional*): A quantile threshold for clamping SVD decomposition output. If None is provided, do not perform clamping. Defaults to None. svd_full_matrices (`bool`, *optional*): Controls whether to compute the full or reduced SVD, and consequently, the shape of the returned tensors U and Vh. Defaults to True. svd_driver (`str`, *optional*): Name of the cuSOLVER method to be used. This keyword argument only works when merging on CUDA. Can be one of [None, `gesvd`, `gesvdj`, `gesvda`]. For more info please refer to `torch.linalg.svd` documentation. Defaults to None. density (`float`, *optional*): Value between 0 and 1. 0 means all values are pruned and 1 means no values are pruned. Should be used with [`ties`, `ties_svd`, `dare_ties`, `dare_linear`, `dare_ties_svd`, `dare_linear_svd`, `magnintude_prune`, `magnitude_prune_svd`] majority_sign_method (`str`): The method, should be one of ["total", "frequency"], to use to get the magnitude of the sign values. Should be used with [`ties`, `ties_svd`, `dare_ties`, `dare_ties_svd`] """ if adapter_name in list(self.peft_config.keys()): return combination_type, new_rank, new_target_modules = self._check_add_weighted_adapter( adapters=adapters, combination_type=combination_type, svd_rank=svd_rank, ) self.peft_config[adapter_name] = replace( self.peft_config[adapters[0]], r=new_rank, lora_alpha=new_rank, target_modules=new_target_modules, ) self.inject_adapter(self.model, adapter_name) # Do we really need that? _freeze_adapter(self.model, adapter_name) key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key] for key in key_list: _, target, _ = _get_submodules(self.model, key) if isinstance(target, LoraLayer): if adapter_name in target.lora_A: target_lora_A = target.lora_A[adapter_name].weight target_lora_B = target.lora_B[adapter_name].weight elif adapter_name in target.lora_embedding_A: target_lora_A = target.lora_embedding_A[adapter_name] target_lora_B = target.lora_embedding_B[adapter_name] else: continue target_lora_A.data = target_lora_A.data * 0.0 target_lora_B.data = target_lora_B.data * 0.0 if combination_type == "cat": loras_A, loras_B = [], [] for adapter, weight in zip(adapters, weights): if adapter in target.lora_A: current_adapter_lora_A = target.lora_A[adapter].weight current_adapter_lora_B = target.lora_B[adapter].weight elif adapter in target.lora_embedding_A: current_adapter_lora_A = target.lora_embedding_A[adapter] current_adapter_lora_B = target.lora_embedding_B[adapter] else: continue loras_A.append(current_adapter_lora_A.data * weight * target.scaling[adapter]) loras_B.append(current_adapter_lora_B.data) if len(loras_A) == 0: raise ValueError("No matching LoRAs found. Please raise an issue on GitHub.") loras_A = torch.cat(loras_A, dim=0) loras_B = torch.cat(loras_B, dim=1) target_lora_A.data[: loras_A.shape[0], :] = loras_A target_lora_B.data[:, : loras_B.shape[1]] = loras_B elif combination_type in [ "svd", "ties_svd", "dare_linear_svd", "dare_ties_svd", "magnitude_prune_svd", ]: target_lora_A.data, target_lora_B.data = self._svd_generalized_task_arithmetic_weighted_adapter( combination_type, adapters, weights, new_rank, target, target_lora_A, target_lora_B, density, majority_sign_method, svd_clamp, full_matrices=svd_full_matrices, driver=svd_driver, ) elif combination_type in ["linear", "ties", "dare_linear", "dare_ties", "magnitude_prune"]: target_lora_A.data, target_lora_B.data = self._generalized_task_arithmetic_weighted_adapter( combination_type, adapters, weights, target, density, majority_sign_method ) def _svd_generalized_task_arithmetic_weighted_adapter( self, combination_type, adapters, weights, new_rank, target, target_lora_A, target_lora_B, density, majority_sign_method, clamp=None, full_matrices=True, driver=None, ): valid_adapters = [] valid_weights = [] is_embedding = any(adapter in target.lora_embedding_A for adapter in adapters) for adapter, weight in zip(adapters, weights): if adapter in target.lora_A or adapter in target.lora_embedding_A: valid_adapters.append(adapter) valid_weights.append(weight * target.scaling[adapter]) # if no valid adapter, nothing to do if len(valid_adapters) == 0: raise ValueError("No matching LoRAs found. Please raise an issue on Github.") delta_weight = [target.get_delta_weight(adapter) for adapter in valid_adapters] valid_weights = torch.tensor(valid_weights).to(delta_weight[0].device) if combination_type == "svd": delta_weight = task_arithmetic(delta_weight, valid_weights) elif combination_type == "ties_svd": delta_weight = ties(delta_weight, valid_weights, density, majority_sign_method) elif combination_type == "dare_linear_svd": delta_weight = dare_linear(delta_weight, valid_weights, density) elif combination_type == "dare_ties_svd": delta_weight = dare_ties(delta_weight, valid_weights, density, majority_sign_method) elif combination_type == "magnitude_prune_svd": delta_weight = magnitude_prune(delta_weight, valid_weights, density) else: raise ValueError(f"Invalid value passed to combination type: {combination_type}") conv2d = isinstance(target, Conv2d) if conv2d: conv2d_1x1 = target.weight.size()[2:4] == (1, 1) if not conv2d_1x1: delta_weight = delta_weight.flatten(start_dim=1) else: delta_weight = delta_weight.squeeze() if (hasattr(target, "fan_in_fan_out") and target.fan_in_fan_out) or is_embedding: delta_weight = delta_weight.T # based on https://github.com/kohya-ss/sd-scripts/blob/main/networks/svd_merge_lora.py#L114-L131 U, S, Vh = torch.linalg.svd(delta_weight, full_matrices=full_matrices, driver=driver) U = U[:, :new_rank] S = S[:new_rank] U = U @ torch.diag(S) Vh = Vh[:new_rank, :] if clamp is not None: dist = torch.cat([U.flatten(), Vh.flatten()]) hi_val = torch.quantile(dist, clamp) low_val = -hi_val U = U.clamp(low_val, hi_val) Vh = Vh.clamp(low_val, hi_val) if conv2d: U = U.reshape(target_lora_B.data.shape) Vh = Vh.reshape(target_lora_A.data.shape) return Vh, U def _generalized_task_arithmetic_weighted_adapter( self, combination_type, adapters, weights, target, density, majority_sign_method, ): # account weights for LoRA A and B layers. valid_weights = [] lora_A_deltas = [] lora_B_deltas = [] for adapter, weight in zip(adapters, weights): if adapter in target.lora_A: current_adapter_lora_A = target.lora_A[adapter].weight current_adapter_lora_B = target.lora_B[adapter].weight elif adapter in target.lora_embedding_A: current_adapter_lora_A = target.lora_embedding_A[adapter] current_adapter_lora_B = target.lora_embedding_B[adapter] else: continue valid_weights.append(math.sqrt(weight * target.scaling[adapter])) lora_A_deltas.append(current_adapter_lora_A.data) lora_B_deltas.append(current_adapter_lora_B.data) valid_weights = torch.tensor(valid_weights).to(lora_A_deltas[0].device) lora_deltas = [lora_A_deltas, lora_B_deltas] dtype = lora_A_deltas[0].dtype for i, task_tensors in enumerate(lora_deltas): if combination_type == "linear": lora_deltas[i] = task_arithmetic(task_tensors, valid_weights) elif combination_type == "ties": lora_deltas[i] = ties(task_tensors, valid_weights, density, majority_sign_method) elif combination_type == "dare_linear": lora_deltas[i] = dare_linear(task_tensors, valid_weights, density) elif combination_type == "dare_ties": lora_deltas[i] = dare_ties(task_tensors, valid_weights, density, majority_sign_method) elif combination_type == "magnitude_prune": lora_deltas[i] = magnitude_prune(task_tensors, valid_weights, density) else: raise ValueError("Invalid combination type") lora_deltas = [delta.to(dtype) for delta in lora_deltas] return lora_deltas def delete_adapter(self, adapter_name: str) -> None: """ Deletes an existing adapter. Args: adapter_name (str): Name of the adapter to be deleted. """ if adapter_name not in list(self.peft_config.keys()): raise ValueError(f"Adapter {adapter_name} does not exist") del self.peft_config[adapter_name] key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key] new_adapter = None for key in key_list: _, target, _ = _get_submodules(self.model, key) if isinstance(target, LoraLayer): target.delete_adapter(adapter_name) if new_adapter is None: new_adapter = target.active_adapters[:] self.active_adapter = new_adapter or [] def merge_and_unload( self, progressbar: bool = False, safe_merge: bool = False, adapter_names: Optional[list[str]] = None ) -> torch.nn.Module: r""" This method merges the LoRa layers into the base model. This is needed if someone wants to use the base model as a standalone model. Args: progressbar (`bool`): whether to show a progressbar indicating the unload and merge process safe_merge (`bool`): whether to activate the safe merging check to check if there is any potential Nan in the adapter weights adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. Example: ```py >>> from transformers import AutoModelForCausalLM >>> from peft import PeftModel >>> base_model = AutoModelForCausalLM.from_pretrained("tiiuae/falcon-40b") >>> peft_model_id = "smangrul/falcon-40B-int4-peft-lora-sfttrainer-sample" >>> model = PeftModel.from_pretrained(base_model, peft_model_id) >>> merged_model = model.merge_and_unload() ``` """ return self._unload_and_optionally_merge( progressbar=progressbar, safe_merge=safe_merge, adapter_names=adapter_names ) def unload(self) -> torch.nn.Module: """ Gets back the base model by removing all the lora modules without merging. This gives back the original base model. """ return self._unload_and_optionally_merge(merge=False) def subtract_mutated_init(self, output_state_dict: dict[str, torch.Tensor], adapter_name: str, kwargs=None): """ This function can calculate the updates of the PiSSA/CorDA/OLoRA by comparing the parameters of the PiSSA/CorDA/OLoRA adapter in `output_state_dict` with the initial values of PiSSA/CorDA/OLoRA in `adapter_name`, thus converting PiSSA/CorDA/OLoRA to LoRA. """ for name, param in self.model.named_parameters(): if ( param.data.dtype != torch.float32 and param.data.dtype != torch.float16 and param.data.dtype != torch.bfloat16 ) and adapter_name.startswith("pissa"): warnings.warn( r"Note that Quant(W_res) + AB != Quant(W) + \Delta(AB); " "the converted LoRA, when combined with W or Quant(W), may introduce a certain gap in the fine-tuned model. " "Therefore, we recommend directly using the Quant(W_res) in conjunction with the PiSSA adapter. " ) mutated_init_state_dict = get_peft_model_state_dict( self, state_dict=kwargs.get("state_dict", None), adapter_name=adapter_name, ) tensors_lora = {} for name in output_state_dict.keys(): ## W = W^{res} + A_0 \times B_0, ## W + \Delta W = W^{res} + A \times B, ## \Delta W = A \times B - A_0 \times B_0 = [A | A_0] \times [B | -B_0]^T = A'B'. if "lora_A" in name: tensors_lora[name] = torch.cat( [output_state_dict[name], mutated_init_state_dict[".".join(name.split(".")[1:])]], dim=0 ) elif "lora_B" in name: tensors_lora[name] = torch.cat( [output_state_dict[name], -mutated_init_state_dict[".".join(name.split(".")[1:])]], dim=1 ) return tensors_lora
peft/src/peft/tuners/lora/model.py/0
{ "file_path": "peft/src/peft/tuners/lora/model.py", "repo_id": "peft", "token_count": 20051 }
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from peft.import_utils import is_bnb_4bit_available, is_bnb_available from peft.utils import register_peft_method from .config import VeraConfig from .layer import Linear, VeraLayer from .model import VeraModel __all__ = ["Linear", "VeraConfig", "VeraLayer", "VeraModel"] register_peft_method(name="vera", config_cls=VeraConfig, model_cls=VeraModel, prefix="vera_lambda_") def __getattr__(name): if (name == "Linear8bitLt") and is_bnb_available(): from .bnb import Linear8bitLt return Linear8bitLt if (name == "Linear4bit") and is_bnb_4bit_available(): from .bnb import Linear4bit return Linear4bit raise AttributeError(f"module {__name__} has no attribute {name}")
peft/src/peft/tuners/vera/__init__.py/0
{ "file_path": "peft/src/peft/tuners/vera/__init__.py", "repo_id": "peft", "token_count": 419 }
# Copyright 2024-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from typing import List, Literal import torch def reshape_weight_task_tensors(task_tensors, weights): """ Reshapes `weights` to match the shape of `task_tensors` by unsqeezing in the remaining dimenions. Args: task_tensors (`torch.Tensor`): The tensors that will be used to reshape `weights`. weights (`torch.Tensor`): The tensor to be reshaped. Returns: `torch.Tensor`: The reshaped tensor. """ new_shape = weights.shape + (1,) * (task_tensors.dim() - weights.dim()) weights = weights.view(new_shape) return weights def magnitude_based_pruning(tensor: torch.Tensor, density: float) -> torch.Tensor: """ Prune the smallest values of the task tensors and retain the top-k values based on the specified fraction `density`. Args: tensor (`torch.Tensor`):The tensor to prune. density (`float`):The fraction of values to preserve. Should be in [0,1]. Returns: `torch.Tensor`: The tensor with the pruned weights. """ mask = torch.zeros_like(tensor).reshape(-1) k = int(density * tensor.numel()) top_k = torch.topk(tensor.abs().reshape(-1), k=k, largest=True) mask[top_k[1]] = 1 return tensor * mask.reshape(tensor.shape) def random_pruning(tensor: torch.Tensor, density: float, rescale: bool) -> torch.Tensor: """ Prune random values based on the specified fraction `density`. Args: tensor (`torch.Tensor`):The tensor to prune. density (`float`):The fraction of values to preserve. Should be in [0,1]. rescale (`bool`):Whether to rescale the result to preserve the expected value of the original tensor. Returns: `torch.Tensor`: The pruned tensor. """ mask = torch.bernoulli(torch.full_like(input=tensor, fill_value=density)) pruned_tensor = tensor * mask if rescale: torch.div(input=pruned_tensor, other=density) return pruned_tensor def prune( tensor: torch.Tensor, density: float, method: Literal["magnitude", "random"], rescale: bool = False ) -> torch.Tensor: """ Prune the values of task tensors based on the `method`. Args: tensor (`torch.Tensor`):The tensor to prune. density (`float`):The fraction of values to preserve. Should be in [0,1]. method (`str`):The method to use to prune. Should be one of ["magnitude", "random"]. rescale (`bool`):Whether to rescale the result to preserve the expected value of the original tensor. Returns: `torch.Tensor`: The pruned tensor. """ if density >= 1: warnings.warn(f"The density {density} is greater than or equal to 1, no pruning will be performed.") return tensor elif density < 0: raise ValueError(f"Density should be >= 0, got {density}") if method == "magnitude": return magnitude_based_pruning(tensor, density) elif method == "random": return random_pruning(tensor, density, rescale=rescale) else: raise ValueError(f"Unknown method {method}") def calculate_majority_sign_mask( tensor: torch.Tensor, method: Literal["total", "frequency"] = "total" ) -> torch.Tensor: """ Get the mask of the majority sign across the task tensors. Task tensors are stacked on dimension 0. Args: tensor (`torch.Tensor`):The tensor to get the mask from. method (`str`):The method to use to get the mask. Should be one of ["total", "frequency"]. Returns: `torch.Tensor`: The majority sign mask. """ sign = tensor.sign() if method == "total": sign_magnitude = tensor.sum(dim=0) elif method == "frequency": sign_magnitude = sign.sum(dim=0) else: raise RuntimeError(f'Unimplemented mask method "{method}"') majority_sign = torch.where(sign_magnitude >= 0, 1, -1) return sign == majority_sign def disjoint_merge(task_tensors: torch.Tensor, majority_sign_mask: torch.Tensor) -> torch.Tensor: """ Merge the task tensors using disjoint merge. Args: task_tensors (`torch.Tensor`):The task tensors to merge. majority_sign_mask (`torch.Tensor`):The mask of the majority sign across the task tensors. Returns: `torch.Tensor`: The merged tensor. """ mixed_task_tensors = (task_tensors * majority_sign_mask).sum(dim=0) num_params_preserved = majority_sign_mask.sum(dim=0) return mixed_task_tensors / torch.clamp(num_params_preserved, min=1.0) def task_arithmetic(task_tensors: List[torch.Tensor], weights: torch.Tensor) -> torch.Tensor: """ Merge the task tensors using `task arithmetic`. Args: task_tensors(`List[torch.Tensor]`):The task tensors to merge. weights (`torch.Tensor`):The weights of the task tensors. Returns: `torch.Tensor`: The merged tensor. """ task_tensors = torch.stack(task_tensors, dim=0) # weighted task tensors weights = reshape_weight_task_tensors(task_tensors, weights) weighted_task_tensors = task_tensors * weights mixed_task_tensors = weighted_task_tensors.sum(dim=0) return mixed_task_tensors def magnitude_prune(task_tensors: List[torch.Tensor], weights: torch.Tensor, density: float) -> torch.Tensor: """ Merge the task tensors using `task arithmetic`. Args: task_tensors(`List[torch.Tensor]`):The task tensors to merge. weights (`torch.Tensor`):The weights of the task tensors. density (`float`): The fraction of values to preserve. Should be in [0,1]. Returns: `torch.Tensor`: The merged tensor. """ # sparsify task_tensors = [prune(tensor, density, method="magnitude") for tensor in task_tensors] task_tensors = torch.stack(task_tensors, dim=0) # weighted task tensors weights = reshape_weight_task_tensors(task_tensors, weights) weighted_task_tensors = task_tensors * weights mixed_task_tensors = weighted_task_tensors.sum(dim=0) return mixed_task_tensors def ties( task_tensors: List[torch.Tensor], weights: torch.Tensor, density: float, majority_sign_method: Literal["total", "frequency"] = "total", ) -> torch.Tensor: """ Merge the task tensors using `ties`. Args: task_tensors(`List[torch.Tensor]`):The task tensors to merge. weights (`torch.Tensor`):The weights of the task tensors. density (`float`):The fraction of values to preserve. Should be in [0,1]. majority_sign_method (`str`): The method to use to get the majority sign mask. Should be one of ["total", "frequency"]. Returns: `torch.Tensor`: The merged tensor. """ # sparsify task_tensors = [prune(tensor, density, method="magnitude") for tensor in task_tensors] task_tensors = torch.stack(task_tensors, dim=0) # Elect Sign majority_sign_mask = calculate_majority_sign_mask(task_tensors, method=majority_sign_method) # weighted task tensors weights = reshape_weight_task_tensors(task_tensors, weights) weighted_task_tensors = task_tensors * weights # Disjoint Merge mixed_task_tensors = disjoint_merge(weighted_task_tensors, majority_sign_mask) return mixed_task_tensors def dare_linear(task_tensors: List[torch.Tensor], weights: torch.Tensor, density: float) -> torch.Tensor: """ Merge the task tensors using `dare linear`. Args: task_tensors(`List[torch.Tensor]`):The task tensors to merge. weights (`torch.Tensor`):The weights of the task tensors. density (`float`):The fraction of values to preserve. Should be in [0,1]. Returns: `torch.Tensor`: The merged tensor. """ # sparsify task_tensors = [prune(tensor, density, method="random", rescale=True) for tensor in task_tensors] task_tensors = torch.stack(task_tensors, dim=0) # weighted task tensors weights = reshape_weight_task_tensors(task_tensors, weights) weighted_task_tensors = task_tensors * weights mixed_task_tensors = weighted_task_tensors.sum(dim=0) return mixed_task_tensors def dare_ties( task_tensors: List[torch.Tensor], weights: torch.Tensor, density: float, majority_sign_method: Literal["total", "frequency"] = "total", ) -> torch.Tensor: """ Merge the task tensors using `dare ties`. Args: task_tensors(`List[torch.Tensor]`):The task tensors to merge. weights (`torch.Tensor`):The weights of the task tensors. density (`float`):The fraction of values to preserve. Should be in [0,1]. majority_sign_method (`str`): The method to use to get the majority sign mask. Should be one of ["total", "frequency"]. Returns: `torch.Tensor`: The merged tensor. """ # sparsify task_tensors = [prune(tensor, density, method="random", rescale=True) for tensor in task_tensors] task_tensors = torch.stack(task_tensors, dim=0) # Elect Sign majority_sign_mask = calculate_majority_sign_mask(task_tensors, method=majority_sign_method) # weighted task tensors weights = reshape_weight_task_tensors(task_tensors, weights) weighted_task_tensors = task_tensors * weights # Disjoint Merge mixed_task_tensors = disjoint_merge(weighted_task_tensors, majority_sign_mask) return mixed_task_tensors
peft/src/peft/utils/merge_utils.py/0
{ "file_path": "peft/src/peft/utils/merge_utils.py", "repo_id": "peft", "token_count": 3819 }
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import torch from parameterized import parameterized from transformers import AutoModelForSeq2SeqLM, AutoModelForTokenClassification from peft import LoraConfig, PromptEncoderConfig, TaskType, get_peft_model from .testing_common import PeftCommonTester, PeftTestConfigManager PEFT_ENCODER_DECODER_MODELS_TO_TEST = [ "ybelkada/tiny-random-T5ForConditionalGeneration-calibrated", "hf-internal-testing/tiny-random-BartForConditionalGeneration", ] FULL_GRID = {"model_ids": PEFT_ENCODER_DECODER_MODELS_TO_TEST, "task_type": "SEQ_2_SEQ_LM"} class PeftEncoderDecoderModelTester(unittest.TestCase, PeftCommonTester): r""" Test if the PeftModel behaves as expected. This includes: - test if the model has the expected methods We use parametrized.expand for debugging purposes to test each model individually. """ transformers_class = AutoModelForSeq2SeqLM def prepare_inputs_for_testing(self): input_ids = torch.tensor([[1, 1, 1], [1, 2, 1]]).to(self.torch_device) decoder_input_ids = torch.tensor([[1, 1, 1], [1, 2, 1]]).to(self.torch_device) attention_mask = torch.tensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device) input_dict = { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, } return input_dict @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_attributes_parametrized(self, test_name, model_id, config_cls, config_kwargs): self._test_model_attr(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_adapter_name(self, test_name, model_id, config_cls, config_kwargs): self._test_adapter_name(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_prepare_for_training_parametrized(self, test_name, model_id, config_cls, config_kwargs): self._test_prepare_for_training(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_save_pretrained(self, test_name, model_id, config_cls, config_kwargs): self._test_save_pretrained(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_save_pretrained_pickle(self, test_name, model_id, config_cls, config_kwargs): self._test_save_pretrained(model_id, config_cls, config_kwargs, safe_serialization=False) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_save_pretrained_selected_adapters(self, test_name, model_id, config_cls, config_kwargs): self._test_save_pretrained_selected_adapters(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_save_pretrained_selected_adapters_pickle(self, test_name, model_id, config_cls, config_kwargs): self._test_save_pretrained_selected_adapters(model_id, config_cls, config_kwargs, safe_serialization=False) def test_load_model_low_cpu_mem_usage(self): self._test_load_model_low_cpu_mem_usage(PEFT_ENCODER_DECODER_MODELS_TO_TEST[0], LoraConfig, {}) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_from_pretrained_config_construction(self, test_name, model_id, config_cls, config_kwargs): self._test_from_pretrained_config_construction(model_id, config_cls, config_kwargs) @parameterized.expand( PeftTestConfigManager.get_grid_parameters( { "model_ids": PEFT_ENCODER_DECODER_MODELS_TO_TEST, "lora_kwargs": {"init_lora_weights": [False]}, "adalora_kwargs": {"init_lora_weights": [False]}, "ia3_kwargs": {"init_ia3_weights": [False]}, "vera_kwargs": {"init_weights": [False]}, "hra_kwargs": {"init_weights": [False]}, "bone_kwargs": {"init_weights": [False]}, "task_type": "SEQ_2_SEQ_LM", }, ) ) def test_merge_layers(self, test_name, model_id, config_cls, config_kwargs): self._test_merge_layers(model_id, config_cls, config_kwargs) @parameterized.expand( PeftTestConfigManager.get_grid_parameters( { "model_ids": PEFT_ENCODER_DECODER_MODELS_TO_TEST, "lora_kwargs": {"init_lora_weights": [False]}, "task_type": "SEQ_2_SEQ_LM", }, ) ) def test_mixed_adapter_batches(self, test_name, model_id, config_cls, config_kwargs): self._test_mixed_adapter_batches(model_id, config_cls, config_kwargs) @parameterized.expand( PeftTestConfigManager.get_grid_parameters( { "model_ids": PEFT_ENCODER_DECODER_MODELS_TO_TEST, "lora_kwargs": {"init_lora_weights": [False]}, "task_type": "SEQ_2_SEQ_LM", }, ) ) def test_generate_with_mixed_adapter_batches(self, test_name, model_id, config_cls, config_kwargs): self._test_generate_with_mixed_adapter_batches_and_beam_search(model_id, config_cls, config_kwargs) # skip non lora models - generate does not work for prefix tuning, prompt tuning @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_generate(self, test_name, model_id, config_cls, config_kwargs): self._test_generate(model_id, config_cls, config_kwargs) # skip non lora models - generate does not work for prefix tuning, prompt tuning @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_generate_pos_args(self, test_name, model_id, config_cls, config_kwargs): # positional arguments are not supported for PeftModelForSeq2SeqLM self._test_generate_pos_args(model_id, config_cls, config_kwargs, raises_err=True) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_generate_half_prec(self, test_name, model_id, config_cls, config_kwargs): self._test_generate_half_prec(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_prefix_tuning_half_prec_conversion(self, test_name, model_id, config_cls, config_kwargs): self._test_prefix_tuning_half_prec_conversion(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_training_encoder_decoders(self, test_name, model_id, config_cls, config_kwargs): self._test_training(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_training_encoder_decoders_layer_indexing(self, test_name, model_id, config_cls, config_kwargs): self._test_training_layer_indexing(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_training_encoder_decoders_gradient_checkpointing(self, test_name, model_id, config_cls, config_kwargs): self._test_training_gradient_checkpointing(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_inference_safetensors(self, test_name, model_id, config_cls, config_kwargs): self._test_inference_safetensors(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_peft_model_device_map(self, test_name, model_id, config_cls, config_kwargs): self._test_peft_model_device_map(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_delete_adapter(self, test_name, model_id, config_cls, config_kwargs): self._test_delete_adapter(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_delete_inactive_adapter(self, test_name, model_id, config_cls, config_kwargs): self._test_delete_inactive_adapter(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_adding_multiple_adapters_with_bias_raises(self, test_name, model_id, config_cls, config_kwargs): self._test_adding_multiple_adapters_with_bias_raises(model_id, config_cls, config_kwargs) @parameterized.expand( PeftTestConfigManager.get_grid_parameters( { "model_ids": PEFT_ENCODER_DECODER_MODELS_TO_TEST, "lora_kwargs": {"init_lora_weights": [False]}, "adalora_kwargs": {"init_lora_weights": [False]}, "ia3_kwargs": {"init_ia3_weights": [False]}, "boft_kwargs": {"init_weights": [False]}, "oft_kwargs": {"init_weights": [False]}, "vera_kwargs": {"init_weights": [False]}, "hra_kwargs": {"init_weights": [False]}, "bone_kwargs": {"init_weights": [False]}, "task_type": "SEQ_2_SEQ_LM", }, ) ) def test_unload_adapter(self, test_name, model_id, config_cls, config_kwargs): self._test_unload_adapter(model_id, config_cls, config_kwargs) @parameterized.expand( PeftTestConfigManager.get_grid_parameters( { "model_ids": PEFT_ENCODER_DECODER_MODELS_TO_TEST, "lora_kwargs": {"init_lora_weights": [False]}, "ia3_kwargs": {"init_ia3_weights": [False]}, "bone_kwargs": {"init_weights": [False]}, "task_type": "SEQ_2_SEQ_LM", }, ) ) def test_weighted_combination_of_adapters(self, test_name, model_id, config_cls, config_kwargs): self._test_weighted_combination_of_adapters(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_training_prompt_learning_tasks(self, test_name, model_id, config_cls, config_kwargs): self._test_training_prompt_learning_tasks(model_id, config_cls, config_kwargs) @parameterized.expand( PeftTestConfigManager.get_grid_parameters( { "model_ids": PEFT_ENCODER_DECODER_MODELS_TO_TEST, "lora_kwargs": {"init_lora_weights": [False]}, "adalora_kwargs": {"init_lora_weights": [False]}, "ia3_kwargs": {"init_ia3_weights": [False]}, "boft_kwargs": {"init_weights": [False]}, "oft_kwargs": {"init_weights": [False]}, "vera_kwargs": {"init_weights": [False]}, "hra_kwargs": {"init_weights": [False]}, "bone_kwargs": {"init_weights": [False]}, "task_type": "SEQ_2_SEQ_LM", }, ) ) def test_disable_adapter(self, test_name, model_id, config_cls, config_kwargs): self._test_disable_adapter(model_id, config_cls, config_kwargs) def test_active_adapters_prompt_learning(self): # see issue https://github.com/huggingface/transformers/pull/30790#issuecomment-2253808249 model = AutoModelForSeq2SeqLM.from_pretrained("hf-internal-testing/tiny-random-BartForConditionalGeneration") # any prompt learning method would work here config = PromptEncoderConfig(task_type=TaskType.SEQ_2_SEQ_LM, num_virtual_tokens=10) model = get_peft_model(model, config) assert model.active_adapters == ["default"] class PeftEncoderDecoderCustomModelTester(unittest.TestCase): """ A custom class to write any custom test related with Enc-Dec models """ def test_save_shared_tensors(self): model_id = "hf-internal-testing/tiny-random-RobertaModel" peft_config = LoraConfig( task_type=TaskType.TOKEN_CLS, inference_mode=False, r=16, lora_alpha=16, lora_dropout=0.1, bias="all" ) model = AutoModelForTokenClassification.from_pretrained(model_id, num_labels=11) model = get_peft_model(model, peft_config) with tempfile.TemporaryDirectory() as tmp_dir: # This should work fine model.save_pretrained(tmp_dir, safe_serialization=True)
peft/tests/test_encoder_decoder_models.py/0
{ "file_path": "peft/tests/test_encoder_decoder_models.py", "repo_id": "peft", "token_count": 5682 }
#!/usr/bin/env python3 # coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import unittest import torch from transformers import AutoModelForSeq2SeqLM, AutoTokenizer from peft import PeftModel, PolyConfig, TaskType, get_peft_model class TestPoly(unittest.TestCase): def test_poly(self): torch.manual_seed(0) model_name_or_path = "google/flan-t5-small" atol, rtol = 1e-6, 1e-6 r = 8 # rank of lora in poly n_tasks = 3 # number of tasks n_skills = 2 # number of skills (loras) n_splits = 4 # number of heads lr = 1e-2 num_epochs = 10 tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) base_model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path) peft_config = PolyConfig( task_type=TaskType.SEQ_2_SEQ_LM, poly_type="poly", r=r, n_tasks=n_tasks, n_skills=n_skills, n_splits=n_splits, ) model = get_peft_model(base_model, peft_config) # generate some dummy data text = os.__doc__.splitlines() assert len(text) > 10 inputs = tokenizer(text, return_tensors="pt", padding=True) inputs["task_ids"] = torch.arange(len(text)) % n_tasks inputs["labels"] = tokenizer((["A", "B"] * 100)[: len(text)], return_tensors="pt")["input_ids"] # simple training loop model.train() optimizer = torch.optim.Adam(model.parameters(), lr=lr) losses = [] for _ in range(num_epochs): outputs = model(**inputs) loss = outputs.loss loss.backward() optimizer.step() optimizer.zero_grad() losses.append(loss.item()) # loss improved by at least 50% assert losses[-1] < (0.5 * losses[0]) # check that saving and loading works torch.manual_seed(0) model.eval() logits_before = model(**inputs).logits tokens_before = model.generate(**inputs) with model.disable_adapter(): logits_disabled = model(**inputs).logits tokens_disabled = model.generate(**inputs) assert not torch.allclose(logits_before, logits_disabled, atol=atol, rtol=rtol) assert not torch.allclose(tokens_before, tokens_disabled, atol=atol, rtol=rtol) # saving and loading with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) base_model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path) loaded = PeftModel.from_pretrained(base_model, tmp_dir) torch.manual_seed(0) output_after = loaded(**inputs).logits tokens_after = loaded.generate(**inputs) assert torch.allclose(logits_before, output_after, atol=atol, rtol=rtol) assert torch.allclose(tokens_before, tokens_after, atol=atol, rtol=rtol)
peft/tests/test_poly.py/0
{ "file_path": "peft/tests/test_poly.py", "repo_id": "peft", "token_count": 1541 }
message: "If you use this software, please cite it as below." title: "PyTorch Image Models" version: "1.2.2" doi: "10.5281/zenodo.4414861" authors: - family-names: Wightman given-names: Ross version: 1.0.11 year: "2019" url: "https://github.com/huggingface/pytorch-image-models" license: "Apache 2.0"
pytorch-image-models/CITATION.cff/0
{ "file_path": "pytorch-image-models/CITATION.cff", "repo_id": "pytorch-image-models", "token_count": 122 }
# EfficientNet (Knapsack Pruned) **EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use \\( 2^N \\) times more computational resources, then we can simply increase the network depth by \\( \alpha ^ N \\), width by \\( \beta ^ N \\), and image size by \\( \gamma ^ N \\), where \\( \alpha, \beta, \gamma \\) are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient \\( \phi \\) to uniformly scale network width, depth, and resolution in a principled way. The compound scaling method is justified by the intuition that if the input image is bigger, then the network needs more layers to increase the receptive field and more channels to capture more fine-grained patterns on the bigger image. The base EfficientNet-B0 network is based on the inverted bottleneck residual blocks of [MobileNetV2](https://paperswithcode.com/method/mobilenetv2), in addition to [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block). This collection consists of pruned EfficientNet models. ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('efficientnet_b1_pruned', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.no_grad(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `efficientnet_b1_pruned`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('efficientnet_b1_pruned', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation ```BibTeX @misc{tan2020efficientnet, title={EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks}, author={Mingxing Tan and Quoc V. Le}, year={2020}, eprint={1905.11946}, archivePrefix={arXiv}, primaryClass={cs.LG} } ``` ``` @misc{aflalo2020knapsack, title={Knapsack Pruning with Inner Distillation}, author={Yonathan Aflalo and Asaf Noy and Ming Lin and Itamar Friedman and Lihi Zelnik}, year={2020}, eprint={2002.08258}, archivePrefix={arXiv}, primaryClass={cs.LG} } ``` <!-- Type: model-index Collections: - Name: EfficientNet Pruned Paper: Title: Knapsack Pruning with Inner Distillation URL: https://paperswithcode.com/paper/knapsack-pruning-with-inner-distillation Models: - Name: efficientnet_b1_pruned In Collection: EfficientNet Pruned Metadata: FLOPs: 489653114 Parameters: 6330000 File Size: 25595162 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Data: - ImageNet ID: efficientnet_b1_pruned Crop Pct: '0.882' Image Size: '240' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1208 Weights: https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45403/outputs/effnetb1_pruned_9ebb3fe6.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.25% Top 5 Accuracy: 93.84% - Name: efficientnet_b2_pruned In Collection: EfficientNet Pruned Metadata: FLOPs: 878133915 Parameters: 8310000 File Size: 33555005 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Data: - ImageNet ID: efficientnet_b2_pruned Crop Pct: '0.89' Image Size: '260' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1219 Weights: https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45403/outputs/effnetb2_pruned_203f55bc.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.91% Top 5 Accuracy: 94.86% - Name: efficientnet_b3_pruned In Collection: EfficientNet Pruned Metadata: FLOPs: 1239590641 Parameters: 9860000 File Size: 39770812 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Data: - ImageNet ID: efficientnet_b3_pruned Crop Pct: '0.904' Image Size: '300' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1230 Weights: https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45403/outputs/effnetb3_pruned_5abcc29f.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.86% Top 5 Accuracy: 95.24% -->
pytorch-image-models/hfdocs/source/models/efficientnet-pruned.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/efficientnet-pruned.mdx", "repo_id": "pytorch-image-models", "token_count": 2777 }
# ResNet **Residual Networks**, or **ResNets**, learn residual functions with reference to the layer inputs, instead of learning unreferenced functions. Instead of hoping each few stacked layers directly fit a desired underlying mapping, residual nets let these layers fit a residual mapping. They stack [residual blocks](https://paperswithcode.com/method/residual-block) ontop of each other to form network: e.g. a ResNet-50 has fifty layers using these blocks. ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('resnet18', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.no_grad(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `resnet18`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('resnet18', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation ```BibTeX @article{DBLP:journals/corr/HeZRS15, author = {Kaiming He and Xiangyu Zhang and Shaoqing Ren and Jian Sun}, title = {Deep Residual Learning for Image Recognition}, journal = {CoRR}, volume = {abs/1512.03385}, year = {2015}, url = {http://arxiv.org/abs/1512.03385}, archivePrefix = {arXiv}, eprint = {1512.03385}, timestamp = {Wed, 17 Apr 2019 17:23:45 +0200}, biburl = {https://dblp.org/rec/journals/corr/HeZRS15.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ``` <!-- Type: model-index Collections: - Name: ResNet Paper: Title: Deep Residual Learning for Image Recognition URL: https://paperswithcode.com/paper/deep-residual-learning-for-image-recognition Models: - Name: resnet18 In Collection: ResNet Metadata: FLOPs: 2337073152 Parameters: 11690000 File Size: 46827520 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: resnet18 Crop Pct: '0.875' Image Size: '224' Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L641 Weights: https://download.pytorch.org/models/resnet18-5c106cde.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 69.74% Top 5 Accuracy: 89.09% - Name: resnet26 In Collection: ResNet Metadata: FLOPs: 3026804736 Parameters: 16000000 File Size: 64129972 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: resnet26 Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L675 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet26-9aa10e23.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 75.29% Top 5 Accuracy: 92.57% - Name: resnet34 In Collection: ResNet Metadata: FLOPs: 4718469120 Parameters: 21800000 File Size: 87290831 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: resnet34 Crop Pct: '0.875' Image Size: '224' Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L658 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet34-43635321.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 75.11% Top 5 Accuracy: 92.28% - Name: resnet50 In Collection: ResNet Metadata: FLOPs: 5282531328 Parameters: 25560000 File Size: 102488165 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: resnet50 Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L691 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet50_ram-a26f946b.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.04% Top 5 Accuracy: 94.39% - Name: resnetblur50 In Collection: ResNet Metadata: FLOPs: 6621606912 Parameters: 25560000 File Size: 102488165 Architecture: - 1x1 Convolution - Batch Normalization - Blur Pooling - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: resnetblur50 Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L1160 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnetblur50-84f4748f.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.29% Top 5 Accuracy: 94.64% - Name: tv_resnet101 In Collection: ResNet Metadata: FLOPs: 10068547584 Parameters: 44550000 File Size: 178728960 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet ID: tv_resnet101 LR: 0.1 Epochs: 90 Crop Pct: '0.875' LR Gamma: 0.1 Momentum: 0.9 Batch Size: 32 Image Size: '224' LR Step Size: 30 Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L761 Weights: https://download.pytorch.org/models/resnet101-5d3b4d8f.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 77.37% Top 5 Accuracy: 93.56% - Name: tv_resnet152 In Collection: ResNet Metadata: FLOPs: 14857660416 Parameters: 60190000 File Size: 241530880 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet ID: tv_resnet152 LR: 0.1 Epochs: 90 Crop Pct: '0.875' LR Gamma: 0.1 Momentum: 0.9 Batch Size: 32 Image Size: '224' LR Step Size: 30 Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L769 Weights: https://download.pytorch.org/models/resnet152-b121ed2d.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.32% Top 5 Accuracy: 94.05% - Name: tv_resnet34 In Collection: ResNet Metadata: FLOPs: 4718469120 Parameters: 21800000 File Size: 87306240 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet ID: tv_resnet34 LR: 0.1 Epochs: 90 Crop Pct: '0.875' LR Gamma: 0.1 Momentum: 0.9 Batch Size: 32 Image Size: '224' LR Step Size: 30 Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L745 Weights: https://download.pytorch.org/models/resnet34-333f7ec4.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 73.3% Top 5 Accuracy: 91.42% - Name: tv_resnet50 In Collection: ResNet Metadata: FLOPs: 5282531328 Parameters: 25560000 File Size: 102502400 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet ID: tv_resnet50 LR: 0.1 Epochs: 90 Crop Pct: '0.875' LR Gamma: 0.1 Momentum: 0.9 Batch Size: 32 Image Size: '224' LR Step Size: 30 Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L753 Weights: https://download.pytorch.org/models/resnet50-19c8e357.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 76.16% Top 5 Accuracy: 92.88% -->
pytorch-image-models/hfdocs/source/models/resnet.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/resnet.mdx", "repo_id": "pytorch-image-models", "token_count": 5076 }
# (Tensorflow) MixNet **MixNet** is a type of convolutional neural network discovered via AutoML that utilises [MixConvs](https://paperswithcode.com/method/mixconv) instead of regular [depthwise convolutions](https://paperswithcode.com/method/depthwise-convolution). The weights from this model were ported from [Tensorflow/TPU](https://github.com/tensorflow/tpu). ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('tf_mixnet_l', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.no_grad(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `tf_mixnet_l`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('tf_mixnet_l', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation ```BibTeX @misc{tan2019mixconv, title={MixConv: Mixed Depthwise Convolutional Kernels}, author={Mingxing Tan and Quoc V. Le}, year={2019}, eprint={1907.09595}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: TF MixNet Paper: Title: 'MixConv: Mixed Depthwise Convolutional Kernels' URL: https://paperswithcode.com/paper/mixnet-mixed-depthwise-convolutional-kernels Models: - Name: tf_mixnet_l In Collection: TF MixNet Metadata: FLOPs: 688674516 Parameters: 7330000 File Size: 29620756 Architecture: - Batch Normalization - Dense Connections - Dropout - Global Average Pooling - Grouped Convolution - MixConv - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - MNAS Training Data: - ImageNet ID: tf_mixnet_l Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1720 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_l-6c92e0c8.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.78% Top 5 Accuracy: 94.0% - Name: tf_mixnet_m In Collection: TF MixNet Metadata: FLOPs: 416633502 Parameters: 5010000 File Size: 20310871 Architecture: - Batch Normalization - Dense Connections - Dropout - Global Average Pooling - Grouped Convolution - MixConv - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - MNAS Training Data: - ImageNet ID: tf_mixnet_m Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1709 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_m-0f4d8805.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 76.96% Top 5 Accuracy: 93.16% - Name: tf_mixnet_s In Collection: TF MixNet Metadata: FLOPs: 302587678 Parameters: 4130000 File Size: 16738218 Architecture: - Batch Normalization - Dense Connections - Dropout - Global Average Pooling - Grouped Convolution - MixConv - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - MNAS Training Data: - ImageNet ID: tf_mixnet_s Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1698 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_s-89d3354b.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 75.68% Top 5 Accuracy: 92.64% -->
pytorch-image-models/hfdocs/source/models/tf-mixnet.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/tf-mixnet.mdx", "repo_id": "pytorch-image-models", "token_count": 2361 }
[build-system] requires = ["pdm-backend"] build-backend = "pdm.backend" [project] name = "timm" authors = [ {name = "Ross Wightman", email = "[email protected]"}, ] description = "PyTorch Image Models" readme = "README.md" requires-python = ">=3.8" keywords = ["pytorch", "image-classification"] license = {text = "Apache-2.0"} classifiers = [ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Education', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3.12', 'Topic :: Scientific/Engineering', 'Topic :: Scientific/Engineering :: Artificial Intelligence', 'Topic :: Software Development', 'Topic :: Software Development :: Libraries', 'Topic :: Software Development :: Libraries :: Python Modules', ] dependencies = [ 'torch', 'torchvision', 'pyyaml', 'huggingface_hub', 'safetensors', ] dynamic = ["version"] [project.urls] homepage = "https://github.com/huggingface/pytorch-image-models" documentation = "https://huggingface.co/docs/timm/en/index" repository = "https://github.com/huggingface/pytorch-image-models" [tool.pdm.dev-dependencies] test = [ 'pytest', 'pytest-timeout', 'pytest-xdist', 'pytest-forked', 'expecttest', ] [tool.pdm.version] source = "file" path = "timm/version.py" [tool.pytest.ini_options] testpaths = ['tests'] markers = [ "base: marker for model tests using the basic setup", "cfg: marker for model tests checking the config", "torchscript: marker for model tests using torchscript", "features: marker for model tests checking feature extraction", "fxforward: marker for model tests using torch fx (only forward)", "fxbackward: marker for model tests using torch fx (only backward)", ]
pytorch-image-models/pyproject.toml/0
{ "file_path": "pytorch-image-models/pyproject.toml", "repo_id": "pytorch-image-models", "token_count": 800 }
""" Optimzier Tests These tests were adapted from PyTorch' optimizer tests. """ import functools import importlib import os from copy import deepcopy import pytest import torch from torch.nn import Parameter from torch.testing._internal.common_utils import TestCase from timm.optim import create_optimizer_v2, list_optimizers, get_optimizer_class, get_optimizer_info, OptimInfo from timm.optim import param_groups_layer_decay, param_groups_weight_decay from timm.scheduler import PlateauLRScheduler torch_backend = os.environ.get('TORCH_BACKEND') if torch_backend is not None: importlib.import_module(torch_backend) torch_device = os.environ.get('TORCH_DEVICE', 'cuda') # HACK relying on internal PyTorch test functionality for comparisons that I don't want to write torch_tc = TestCase() def _test_basic_cases_template(weight, bias, input, constructor, scheduler_constructors): weight = Parameter(weight) bias = Parameter(bias) input = Parameter(input) optimizer = constructor(weight, bias) schedulers = [] for scheduler_constructor in scheduler_constructors: schedulers.append(scheduler_constructor(optimizer)) # to check if the optimizer can be printed as a string optimizer.__repr__() def fn(): optimizer.zero_grad() y = weight.mv(input) if y.is_cuda and bias.is_cuda and y.get_device() != bias.get_device(): y = y.cuda(bias.get_device()) loss = (y + bias).pow(2).sum() loss.backward() return loss initial_value = fn().item() for _i in range(200): for scheduler in schedulers: if isinstance(scheduler, PlateauLRScheduler): val_loss = fn() scheduler.step(val_loss) else: scheduler.step() optimizer.step(fn) assert fn().item() < initial_value def _test_state_dict(weight, bias, input, constructor): weight = Parameter(weight) bias = Parameter(bias) input = Parameter(input) def fn_base(optimizer, weight, bias): optimizer.zero_grad() i = input_device if weight.device.type != 'cpu' else input loss = (weight.mv(i) + bias).pow(2).sum() loss.backward() return loss optimizer = constructor(weight, bias) fn = functools.partial(fn_base, optimizer, weight, bias) # Prime the optimizer for _i in range(20): optimizer.step(fn) # Clone the weights and construct new optimizer for them with torch.no_grad(): weight_c = Parameter(weight.clone().detach()) bias_c = Parameter(bias.clone().detach()) optimizer_c = constructor(weight_c, bias_c) fn_c = functools.partial(fn_base, optimizer_c, weight_c, bias_c) # Load state dict state_dict = deepcopy(optimizer.state_dict()) state_dict_c = deepcopy(optimizer.state_dict()) optimizer_c.load_state_dict(state_dict_c) # Run both optimizations in parallel for _i in range(20): optimizer.step(fn) optimizer_c.step(fn_c) torch_tc.assertEqual(weight, weight_c) torch_tc.assertEqual(bias, bias_c) # Make sure state dict is deterministic with equal but not identical parameters torch_tc.assertEqual(optimizer.state_dict(), optimizer_c.state_dict()) # Make sure repeated parameters have identical representation in state dict optimizer_c.param_groups.extend(optimizer_c.param_groups) torch_tc.assertEqual(optimizer.state_dict()['param_groups'][-1], optimizer_c.state_dict()['param_groups'][-1]) # Check that state dict can be loaded even when we cast parameters # to a different type and move to a different device. if torch_device == 'cpu': return elif torch_device == 'cuda' and not torch.cuda.is_available(): return with torch.no_grad(): input_device = Parameter(input.clone().detach().float().to(torch_device)) weight_device = Parameter(weight.clone().detach().to(torch_device)) bias_device = Parameter(bias.clone().detach().to(torch_device)) optimizer_device = constructor(weight_device, bias_device) fn_device = functools.partial(fn_base, optimizer_device, weight_device, bias_device) state_dict = deepcopy(optimizer.state_dict()) state_dict_c = deepcopy(optimizer.state_dict()) optimizer_device.load_state_dict(state_dict_c) # Make sure state dict wasn't modified torch_tc.assertEqual(state_dict, state_dict_c) for _i in range(20): optimizer.step(fn) optimizer_device.step(fn_device) torch_tc.assertEqual(weight, weight_device) torch_tc.assertEqual(bias, bias_device) # validate deepcopy() copies all public attributes def getPublicAttr(obj): return set(k for k in obj.__dict__ if not k.startswith('_')) assert getPublicAttr(optimizer) == getPublicAttr(deepcopy(optimizer)) def _test_basic_cases(constructor, scheduler_constructors=None): if scheduler_constructors is None: scheduler_constructors = [] _test_state_dict( torch.randn(10, 5), torch.randn(10), torch.randn(5), constructor ) _test_basic_cases_template( torch.randn(10, 5), torch.randn(10), torch.randn(5), constructor, scheduler_constructors ) # non-contiguous parameters _test_basic_cases_template( torch.randn(10, 5, 2)[..., 0], torch.randn(10, 2)[..., 0], torch.randn(5), constructor, scheduler_constructors ) # CUDA if torch_device == 'cpu': return elif torch_device == 'cuda' and not torch.cuda.is_available(): return _test_basic_cases_template( torch.randn(10, 5).to(torch_device), torch.randn(10).to(torch_device), torch.randn(5).to(torch_device), constructor, scheduler_constructors ) def _test_model(optimizer, params, device=torch.device('cpu'), after_step=0): weight = torch.tensor( [[-0.2109, -0.4976], [-0.1413, -0.3420], [-0.2524, 0.6976]], device=device, requires_grad=True) bias = torch.tensor([-0.1085, -0.2979, 0.6892], device=device, requires_grad=True) weight2 = torch.tensor([[-0.0508, -0.3941, -0.2843]], device=device, requires_grad=True) bias2 = torch.tensor([-0.0711], device=device, requires_grad=True) input = torch.tensor([0.1, 0.2, 0.3, 0.4, 0.5, 0.6], device=device).reshape(3, 2) model = torch.nn.Sequential(torch.nn.Linear(2, 3), torch.nn.Sigmoid(), torch.nn.Linear(3, 1), torch.nn.Sigmoid()) model.to(device) pretrained_dict = model.state_dict() pretrained_dict['0.weight'] = weight pretrained_dict['0.bias'] = bias pretrained_dict['2.weight'] = weight2 pretrained_dict['2.bias'] = bias2 model.load_state_dict(pretrained_dict) optimizer = create_optimizer_v2(model, opt=optimizer, **params) prev_loss = float('inf') for i in range(20): optimizer.zero_grad() output = model(input) loss = output.sum() loss.backward() loss = loss.item() if i > after_step: assert loss < prev_loss prev_loss = loss optimizer.step() def rosenbrock(tensor): x, y = tensor return (1 - x) ** 2 + 100 * (y - x ** 2) ** 2 def drosenbrock(tensor): x, y = tensor return torch.tensor((-400 * x * (y - x ** 2) - 2 * (1 - x), 200 * (y - x ** 2))) def _test_rosenbrock(constructor, scheduler_constructors=None): if scheduler_constructors is None: scheduler_constructors = [] params_t = torch.tensor([1.5, 1.5]) params = Parameter(params_t) optimizer = constructor([params]) schedulers = [] for scheduler_constructor in scheduler_constructors: schedulers.append(scheduler_constructor(optimizer)) solution = torch.tensor([1, 1]) initial_dist = params.clone().detach().dist(solution) def get_grad(_param, _sparse_grad, _w): grad = drosenbrock(params.clone().detach()) # Depending on w, provide only the x or y gradient if _sparse_grad: if _w: i = torch.tensor([[0, 0]], dtype=torch.int64) x = grad[0] v = torch.tensor([x / 4.0, x - x / 4.0]) else: i = torch.tensor([[1, 1]], dtype=torch.int64) y = grad[1] v = torch.tensor([y - y / 4.0, y / 4.0]) grad_out = torch.sparse_coo_tensor(i, v, (2,), dtype=v.dtype) else: if _w: grad_out = torch.tensor([grad[0], 0], dtype=_param.dtype) else: grad_out = torch.tensor([0, grad[1]], dtype=_param.dtype) return grad_out def eval(_param, _sparse_grad, _w): # Depending on w, provide only the x or y gradient optimizer.zero_grad() loss = rosenbrock(_param) loss.backward() grad_out = get_grad(_param, _sparse_grad, _w) with torch.no_grad(): _param.grad = grad_out.to_dense() return loss for i in range(2000): # Do cyclic coordinate descent w = i % 2 optimizer.step(functools.partial(eval, params, True, w)) for scheduler in schedulers: if isinstance(scheduler, PlateauLRScheduler): scheduler.step(rosenbrock(params)) else: scheduler.step() torch_tc.assertLessEqual(params.clone().detach().dist(solution), initial_dist) def _build_params_dict(weight, bias, **kwargs): return [{'params': [weight]}, dict(params=[bias], **kwargs)] def _build_params_dict_single(weight, bias, **kwargs): return [dict(params=bias, **kwargs)] @pytest.mark.parametrize('optimizer', list_optimizers(exclude_filters=('fused*', 'bnb*', 'kron*'))) def test_optim_factory(optimizer): assert issubclass(get_optimizer_class(optimizer, bind_defaults=False), torch.optim.Optimizer) opt_info = get_optimizer_info(optimizer) assert isinstance(opt_info, OptimInfo) lr = (1e-2,) * 4 if optimizer in ('mars', 'nadam', 'claprop', 'crmsproptf', 'cadafactorbv', 'csgdw', 'clamb'): lr = (1e-3,) * 4 elif optimizer in ('cmars',): lr = (1e-4,) * 4 try: if not opt_info.second_order: # basic tests don't support second order right now # test basic cases that don't need specific tuning via factory test _test_basic_cases( lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=lr[0]) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict(weight, bias, lr=lr[1]), optimizer, lr=lr[1] / 10) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict_single(weight, bias, lr=lr[2]), optimizer, lr=lr[2] / 10) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict_single(weight, bias, lr=lr[3]), optimizer) ) except TypeError as e: if 'radamw' in optimizer: pytest.skip("Expected for 'radamw' (decoupled decay) to fail in older PyTorch versions.") else: raise e #@pytest.mark.parametrize('optimizer', ['sgd', 'momentum']) # FIXME momentum variant frequently fails in GitHub runner, but never local after many attempts @pytest.mark.parametrize('optimizer', ['sgd']) def test_sgd(optimizer): # _test_basic_cases( # lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3), # [lambda opt: StepLR(opt, gamma=0.9, step_size=10)] # ) # _test_basic_cases( # lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3), # [lambda opt: WarmUpLR(opt, warmup_factor=0.4, warmup_iters=4, warmup_method="linear")] # ) # _test_basic_cases( # lambda weight, bias: optimizer([weight, bias], lr=1e-3), # [lambda opt: WarmUpLR(opt, warmup_factor=0.4, warmup_iters=4, warmup_method="constant")] # ) # _test_basic_cases( # lambda weight, bias: optimizer([weight, bias], lr=1e-3), # [lambda opt: StepLR(opt, gamma=0.9, step_size=10), # lambda opt: WarmUpLR(opt, warmup_factor=0.4, warmup_iters=4)] # ) # _test_basic_cases( # lambda weight, bias: optimizer([weight, bias], lr=1e-3), # [lambda opt: StepLR(opt, gamma=0.9, step_size=10), # lambda opt: ReduceLROnPlateau(opt)] # ) # _test_basic_cases( # lambda weight, bias: optimizer([weight, bias], lr=1e-3), # [lambda opt: StepLR(opt, gamma=0.99, step_size=10), # lambda opt: ExponentialLR(opt, gamma=0.99), # lambda opt: ReduceLROnPlateau(opt)] # ) _test_basic_cases( lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=3e-3, momentum=1) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=3e-3, momentum=1, weight_decay=.1) ) _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=1e-3) ) _test_model(optimizer, dict(lr=1e-3)) @pytest.mark.parametrize('optimizer', ['adamw', 'adam', 'nadam', 'adamax', 'nadamw']) def test_adam(optimizer): _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=5e-2) ) _test_model(optimizer, dict(lr=5e-2)) @pytest.mark.parametrize('optimizer', ['kron']) def test_kron(optimizer): _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=1e-3) ) _test_model(optimizer, dict(lr=1e-3)) @pytest.mark.parametrize('optimizer', ['adopt', 'adoptw']) def test_adopt(optimizer): _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=3e-3) ) _test_model(optimizer, dict(lr=5e-2), after_step=1) # note no convergence in first step for ADOPT @pytest.mark.parametrize('optimizer', ['adan', 'adanw']) def test_adan(optimizer): _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=1e-3) ) _test_model(optimizer, dict(lr=5e-2), after_step=1) # note no convergence in first step for ADOPT @pytest.mark.parametrize('optimizer', ['adabelief']) def test_adabelief(optimizer): _test_basic_cases( lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3, weight_decay=1) ) _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=5e-2) ) _test_model(optimizer, dict(lr=5e-2)) @pytest.mark.parametrize('optimizer', ['radam', 'radabelief']) def test_rectified(optimizer): _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=1e-3) ) _test_model(optimizer, dict(lr=1e-3)) @pytest.mark.parametrize('optimizer', ['adadelta', 'adagrad']) def test_adaother(optimizer): _test_basic_cases( lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3, weight_decay=1) ) _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=1e-1) ) _test_model(optimizer, dict(lr=5e-2)) @pytest.mark.parametrize('optimizer', ['adafactor', 'adafactorbv']) def test_adafactor(optimizer): _test_basic_cases( lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3, weight_decay=1) ) _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=5e-2) ) _test_model(optimizer, dict(lr=5e-2)) @pytest.mark.parametrize('optimizer', ['lamb', 'lambc']) def test_lamb(optimizer): _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=1e-3) ) _test_model(optimizer, dict(lr=1e-3)) @pytest.mark.parametrize('optimizer', ['laprop']) def test_laprop(optimizer): _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=1e-2) ) _test_model(optimizer, dict(lr=1e-2)) @pytest.mark.parametrize('optimizer', ['lars', 'larc', 'nlars', 'nlarc']) def test_lars(optimizer): _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=1e-3) ) _test_model(optimizer, dict(lr=1e-3)) @pytest.mark.parametrize('optimizer', ['madgrad', 'madgradw']) def test_madgrad(optimizer): _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=1e-2) ) _test_model(optimizer, dict(lr=1e-2)) @pytest.mark.parametrize('optimizer', ['mars']) def test_mars(optimizer): _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=1e-3) ) _test_model(optimizer, dict(lr=5e-2), after_step=1) # note no convergence in first step for ADOPT @pytest.mark.parametrize('optimizer', ['novograd']) def test_novograd(optimizer): _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=1e-3) ) _test_model(optimizer, dict(lr=1e-3)) @pytest.mark.parametrize('optimizer', ['rmsprop', 'rmsproptf']) def test_rmsprop(optimizer): _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=1e-2) ) _test_model(optimizer, dict(lr=1e-2)) @pytest.mark.parametrize('optimizer', ['adamp']) def test_adamp(optimizer): _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=5e-2) ) _test_model(optimizer, dict(lr=5e-2)) @pytest.mark.parametrize('optimizer', ['sgdp']) def test_sgdp(optimizer): _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=1e-3) ) _test_model(optimizer, dict(lr=1e-3)) @pytest.mark.parametrize('optimizer', ['lookahead_sgd', 'lookahead_momentum']) def test_lookahead_sgd(optimizer): _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=1e-3) ) @pytest.mark.parametrize('optimizer', ['lookahead_adamw', 'lookahead_adam']) def test_lookahead_adam(optimizer): _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=5e-2) ) @pytest.mark.parametrize('optimizer', ['lookahead_radam']) def test_lookahead_radam(optimizer): _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=1e-4) ) def test_param_groups_layer_decay_with_end_decay(): model = torch.nn.Sequential( torch.nn.Linear(10, 5), torch.nn.ReLU(), torch.nn.Linear(5, 2) ) param_groups = param_groups_layer_decay( model, weight_decay=0.05, layer_decay=0.75, end_layer_decay=0.5, verbose=True ) assert len(param_groups) > 0 # Verify layer scaling is applied with end decay for group in param_groups: assert 'lr_scale' in group assert group['lr_scale'] <= 1.0 assert group['lr_scale'] >= 0.5 def test_param_groups_layer_decay_with_matcher(): class ModelWithMatcher(torch.nn.Module): def __init__(self): super().__init__() self.layer1 = torch.nn.Linear(10, 5) self.layer2 = torch.nn.Linear(5, 2) def group_matcher(self, coarse=False): return lambda name: int(name.split('.')[0][-1]) model = ModelWithMatcher() param_groups = param_groups_layer_decay( model, weight_decay=0.05, layer_decay=0.75, verbose=True ) assert len(param_groups) > 0 # Verify layer scaling is applied for group in param_groups: assert 'lr_scale' in group assert 'weight_decay' in group assert len(group['params']) > 0 def test_param_groups_weight_decay(): model = torch.nn.Sequential( torch.nn.Linear(10, 5), torch.nn.ReLU(), torch.nn.Linear(5, 2) ) weight_decay = 0.01 no_weight_decay_list = ['1.weight'] param_groups = param_groups_weight_decay( model, weight_decay=weight_decay, no_weight_decay_list=no_weight_decay_list ) assert len(param_groups) == 2 assert param_groups[0]['weight_decay'] == 0.0 assert param_groups[1]['weight_decay'] == weight_decay # Verify parameters are correctly grouped no_decay_params = set(param_groups[0]['params']) decay_params = set(param_groups[1]['params']) for name, param in model.named_parameters(): if param.ndim <= 1 or name.endswith(".bias") or name in no_weight_decay_list: assert param in no_decay_params else: assert param in decay_params
pytorch-image-models/tests/test_optim.py/0
{ "file_path": "pytorch-image-models/tests/test_optim.py", "repo_id": "pytorch-image-models", "token_count": 9446 }
import csv import os import pkgutil import re from typing import Dict, List, Optional, Union from .dataset_info import DatasetInfo # NOTE no ambiguity wrt to mapping from # classes to ImageNet subset so far, but likely to change _NUM_CLASSES_TO_SUBSET = { 1000: 'imagenet-1k', 11221: 'imagenet-21k-miil', # miil subset of fall11 11821: 'imagenet-12k', # timm specific 12k subset of fall11 21841: 'imagenet-22k', # as in fall11.tar 21842: 'imagenet-22k-ms', # a Microsoft (for FocalNet) remapping of 22k w/ moves ImageNet-1k classes to first 1000 21843: 'imagenet-21k-goog', # Google's ImageNet full has two classes not in fall11 } _SUBSETS = { 'imagenet1k': 'imagenet_synsets.txt', 'imagenet12k': 'imagenet12k_synsets.txt', 'imagenet22k': 'imagenet22k_synsets.txt', 'imagenet21k': 'imagenet21k_goog_synsets.txt', 'imagenet21kgoog': 'imagenet21k_goog_synsets.txt', 'imagenet21kmiil': 'imagenet21k_miil_synsets.txt', 'imagenet22kms': 'imagenet22k_ms_synsets.txt', } _LEMMA_FILE = 'imagenet_synset_to_lemma.txt' _DEFINITION_FILE = 'imagenet_synset_to_definition.txt' def infer_imagenet_subset(model_or_cfg) -> Optional[str]: if isinstance(model_or_cfg, dict): num_classes = model_or_cfg.get('num_classes', None) else: num_classes = getattr(model_or_cfg, 'num_classes', None) if not num_classes: pretrained_cfg = getattr(model_or_cfg, 'pretrained_cfg', {}) # FIXME at some point pretrained_cfg should include dataset-tag, # which will be more robust than a guess based on num_classes num_classes = pretrained_cfg.get('num_classes', None) if not num_classes or num_classes not in _NUM_CLASSES_TO_SUBSET: return None return _NUM_CLASSES_TO_SUBSET[num_classes] class ImageNetInfo(DatasetInfo): def __init__(self, subset: str = 'imagenet-1k'): super().__init__() subset = re.sub(r'[-_\s]', '', subset.lower()) assert subset in _SUBSETS, f'Unknown imagenet subset {subset}.' # WordNet synsets (part-of-speech + offset) are the unique class label names for ImageNet classifiers synset_file = _SUBSETS[subset] synset_data = pkgutil.get_data(__name__, os.path.join('_info', synset_file)) self._synsets = synset_data.decode('utf-8').splitlines() # WordNet lemmas (canonical dictionary form of word) and definitions are used to build # the class descriptions. If detailed=True both are used, otherwise just the lemmas. lemma_data = pkgutil.get_data(__name__, os.path.join('_info', _LEMMA_FILE)) reader = csv.reader(lemma_data.decode('utf-8').splitlines(), delimiter='\t') self._lemmas = dict(reader) definition_data = pkgutil.get_data(__name__, os.path.join('_info', _DEFINITION_FILE)) reader = csv.reader(definition_data.decode('utf-8').splitlines(), delimiter='\t') self._definitions = dict(reader) def num_classes(self): return len(self._synsets) def label_names(self): return self._synsets def label_descriptions(self, detailed: bool = False, as_dict: bool = False) -> Union[List[str], Dict[str, str]]: if as_dict: return {label: self.label_name_to_description(label, detailed=detailed) for label in self._synsets} else: return [self.label_name_to_description(label, detailed=detailed) for label in self._synsets] def index_to_label_name(self, index) -> str: assert 0 <= index < len(self._synsets), \ f'Index ({index}) out of range for dataset with {len(self._synsets)} classes.' return self._synsets[index] def index_to_description(self, index: int, detailed: bool = False) -> str: label = self.index_to_label_name(index) return self.label_name_to_description(label, detailed=detailed) def label_name_to_description(self, label: str, detailed: bool = False) -> str: if detailed: description = f'{self._lemmas[label]}: {self._definitions[label]}' else: description = f'{self._lemmas[label]}' return description
pytorch-image-models/timm/data/imagenet_info.py/0
{ "file_path": "pytorch-image-models/timm/data/imagenet_info.py", "repo_id": "pytorch-image-models", "token_count": 1732 }
from multiprocessing import Value class SharedCount: def __init__(self, epoch: int = 0): self.shared_epoch = Value('i', epoch) @property def value(self): return self.shared_epoch.value @value.setter def value(self, epoch): self.shared_epoch.value = epoch
pytorch-image-models/timm/data/readers/shared_count.py/0
{ "file_path": "pytorch-image-models/timm/data/readers/shared_count.py", "repo_id": "pytorch-image-models", "token_count": 122 }
""" PyTorch Conditionally Parameterized Convolution (CondConv) Paper: CondConv: Conditionally Parameterized Convolutions for Efficient Inference (https://arxiv.org/abs/1904.04971) Hacked together by / Copyright 2020 Ross Wightman """ import math from functools import partial import numpy as np import torch from torch import nn as nn from torch.nn import functional as F from .helpers import to_2tuple from .conv2d_same import conv2d_same from .padding import get_padding_value def get_condconv_initializer(initializer, num_experts, expert_shape): def condconv_initializer(weight): """CondConv initializer function.""" num_params = np.prod(expert_shape) if (len(weight.shape) != 2 or weight.shape[0] != num_experts or weight.shape[1] != num_params): raise (ValueError( 'CondConv variables must have shape [num_experts, num_params]')) for i in range(num_experts): initializer(weight[i].view(expert_shape)) return condconv_initializer class CondConv2d(nn.Module): """ Conditionally Parameterized Convolution Inspired by: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/condconv/condconv_layers.py Grouped convolution hackery for parallel execution of the per-sample kernel filters inspired by this discussion: https://github.com/pytorch/pytorch/issues/17983 """ __constants__ = ['in_channels', 'out_channels', 'dynamic_padding'] def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding='', dilation=1, groups=1, bias=False, num_experts=4): super(CondConv2d, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = to_2tuple(kernel_size) self.stride = to_2tuple(stride) padding_val, is_padding_dynamic = get_padding_value( padding, kernel_size, stride=stride, dilation=dilation) self.dynamic_padding = is_padding_dynamic # if in forward to work with torchscript self.padding = to_2tuple(padding_val) self.dilation = to_2tuple(dilation) self.groups = groups self.num_experts = num_experts self.weight_shape = (self.out_channels, self.in_channels // self.groups) + self.kernel_size weight_num_param = 1 for wd in self.weight_shape: weight_num_param *= wd self.weight = torch.nn.Parameter(torch.Tensor(self.num_experts, weight_num_param)) if bias: self.bias_shape = (self.out_channels,) self.bias = torch.nn.Parameter(torch.Tensor(self.num_experts, self.out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): init_weight = get_condconv_initializer( partial(nn.init.kaiming_uniform_, a=math.sqrt(5)), self.num_experts, self.weight_shape) init_weight(self.weight) if self.bias is not None: fan_in = np.prod(self.weight_shape[1:]) bound = 1 / math.sqrt(fan_in) init_bias = get_condconv_initializer( partial(nn.init.uniform_, a=-bound, b=bound), self.num_experts, self.bias_shape) init_bias(self.bias) def forward(self, x, routing_weights): B, C, H, W = x.shape weight = torch.matmul(routing_weights, self.weight) new_weight_shape = (B * self.out_channels, self.in_channels // self.groups) + self.kernel_size weight = weight.view(new_weight_shape) bias = None if self.bias is not None: bias = torch.matmul(routing_weights, self.bias) bias = bias.view(B * self.out_channels) # move batch elements with channels so each batch element can be efficiently convolved with separate kernel # reshape instead of view to work with channels_last input x = x.reshape(1, B * C, H, W) if self.dynamic_padding: out = conv2d_same( x, weight, bias, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups * B) else: out = F.conv2d( x, weight, bias, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups * B) out = out.permute([1, 0, 2, 3]).view(B, self.out_channels, out.shape[-2], out.shape[-1]) # Literal port (from TF definition) # x = torch.split(x, 1, 0) # weight = torch.split(weight, 1, 0) # if self.bias is not None: # bias = torch.matmul(routing_weights, self.bias) # bias = torch.split(bias, 1, 0) # else: # bias = [None] * B # out = [] # for xi, wi, bi in zip(x, weight, bias): # wi = wi.view(*self.weight_shape) # if bi is not None: # bi = bi.view(*self.bias_shape) # out.append(self.conv_fn( # xi, wi, bi, stride=self.stride, padding=self.padding, # dilation=self.dilation, groups=self.groups)) # out = torch.cat(out, 0) return out
pytorch-image-models/timm/layers/cond_conv2d.py/0
{ "file_path": "pytorch-image-models/timm/layers/cond_conv2d.py", "repo_id": "pytorch-image-models", "token_count": 2314 }
""" Global Context Attention Block Paper: `GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond` - https://arxiv.org/abs/1904.11492 Official code consulted as reference: https://github.com/xvjiarui/GCNet Hacked together by / Copyright 2021 Ross Wightman """ from torch import nn as nn import torch.nn.functional as F from .create_act import create_act_layer, get_act_layer from .helpers import make_divisible from .mlp import ConvMlp from .norm import LayerNorm2d class GlobalContext(nn.Module): def __init__(self, channels, use_attn=True, fuse_add=False, fuse_scale=True, init_last_zero=False, rd_ratio=1./8, rd_channels=None, rd_divisor=1, act_layer=nn.ReLU, gate_layer='sigmoid'): super(GlobalContext, self).__init__() act_layer = get_act_layer(act_layer) self.conv_attn = nn.Conv2d(channels, 1, kernel_size=1, bias=True) if use_attn else None if rd_channels is None: rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) if fuse_add: self.mlp_add = ConvMlp(channels, rd_channels, act_layer=act_layer, norm_layer=LayerNorm2d) else: self.mlp_add = None if fuse_scale: self.mlp_scale = ConvMlp(channels, rd_channels, act_layer=act_layer, norm_layer=LayerNorm2d) else: self.mlp_scale = None self.gate = create_act_layer(gate_layer) self.init_last_zero = init_last_zero self.reset_parameters() def reset_parameters(self): if self.conv_attn is not None: nn.init.kaiming_normal_(self.conv_attn.weight, mode='fan_in', nonlinearity='relu') if self.mlp_add is not None: nn.init.zeros_(self.mlp_add.fc2.weight) def forward(self, x): B, C, H, W = x.shape if self.conv_attn is not None: attn = self.conv_attn(x).reshape(B, 1, H * W) # (B, 1, H * W) attn = F.softmax(attn, dim=-1).unsqueeze(3) # (B, 1, H * W, 1) context = x.reshape(B, C, H * W).unsqueeze(1) @ attn context = context.view(B, C, 1, 1) else: context = x.mean(dim=(2, 3), keepdim=True) if self.mlp_scale is not None: mlp_x = self.mlp_scale(context) x = x * self.gate(mlp_x) if self.mlp_add is not None: mlp_x = self.mlp_add(context) x = x + mlp_x return x
pytorch-image-models/timm/layers/global_context.py/0
{ "file_path": "pytorch-image-models/timm/layers/global_context.py", "repo_id": "pytorch-image-models", "token_count": 1169 }
""" Normalization layers and wrappers Norm layer definitions that support fast norm and consistent channel arg order (always first arg). Hacked together by / Copyright 2022 Ross Wightman """ import numbers from typing import Tuple import torch import torch.nn as nn import torch.nn.functional as F from .fast_norm import is_fast_norm, fast_group_norm, fast_layer_norm, fast_rms_norm, fast_simple_norm, simple_norm try: from torch.nn.functional import rms_norm except ImportError: from .fast_norm import rms_norm class GroupNorm(nn.GroupNorm): _fast_norm: torch.jit.Final[bool] def __init__(self, num_channels, num_groups=32, eps=1e-5, affine=True): # NOTE num_channels is swapped to first arg for consistency in swapping norm layers with BN super().__init__(num_groups, num_channels, eps=eps, affine=affine) self._fast_norm = is_fast_norm() # can't script unless we have these flags here (no globals) def forward(self, x): if self._fast_norm: return fast_group_norm(x, self.num_groups, self.weight, self.bias, self.eps) else: return F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps) class GroupNorm1(nn.GroupNorm): """ Group Normalization with 1 group. Input: tensor in shape [B, C, *] """ _fast_norm: torch.jit.Final[bool] def __init__(self, num_channels, **kwargs): super().__init__(1, num_channels, **kwargs) self._fast_norm = is_fast_norm() # can't script unless we have these flags here (no globals) def forward(self, x: torch.Tensor) -> torch.Tensor: if self._fast_norm: return fast_group_norm(x, self.num_groups, self.weight, self.bias, self.eps) else: return F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps) class LayerNorm(nn.LayerNorm): """ LayerNorm w/ fast norm option """ _fast_norm: torch.jit.Final[bool] def __init__(self, num_channels, eps=1e-6, affine=True): super().__init__(num_channels, eps=eps, elementwise_affine=affine) self._fast_norm = is_fast_norm() # can't script unless we have these flags here (no globals) def forward(self, x: torch.Tensor) -> torch.Tensor: if self._fast_norm: x = fast_layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) else: x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) return x class LayerNorm2d(nn.LayerNorm): """ LayerNorm for channels of '2D' spatial NCHW tensors """ _fast_norm: torch.jit.Final[bool] def __init__(self, num_channels, eps=1e-6, affine=True): super().__init__(num_channels, eps=eps, elementwise_affine=affine) self._fast_norm = is_fast_norm() # can't script unless we have these flags here (no globals) def forward(self, x: torch.Tensor) -> torch.Tensor: x = x.permute(0, 2, 3, 1) if self._fast_norm: x = fast_layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) else: x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) x = x.permute(0, 3, 1, 2) return x def _is_contiguous(tensor: torch.Tensor) -> bool: # jit is oh so lovely :/ if torch.jit.is_scripting(): return tensor.is_contiguous() else: return tensor.is_contiguous(memory_format=torch.contiguous_format) def _layer_norm_cf(x: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor, eps: float): s, u = torch.var_mean(x, dim=1, unbiased=False, keepdim=True) x = (x - u) * torch.rsqrt(s + eps) x = x * weight[:, None, None] + bias[:, None, None] return x def _layer_norm_cf_sqm(x: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor, eps: float): u = x.mean(dim=1, keepdim=True) s = ((x * x).mean(dim=1, keepdim=True) - (u * u)).clamp(0) x = (x - u) * torch.rsqrt(s + eps) x = x * weight.view(1, -1, 1, 1) + bias.view(1, -1, 1, 1) return x class LayerNormExp2d(nn.LayerNorm): """ LayerNorm for channels_first tensors with 2d spatial dimensions (ie N, C, H, W). Experimental implementation w/ manual norm for tensors non-contiguous tensors. This improves throughput in some scenarios (tested on Ampere GPU), esp w/ channels_last layout. However, benefits are not always clear and can perform worse on other GPUs. """ def __init__(self, num_channels, eps=1e-6): super().__init__(num_channels, eps=eps) def forward(self, x) -> torch.Tensor: if _is_contiguous(x): x = F.layer_norm( x.permute(0, 2, 3, 1), self.normalized_shape, self.weight, self.bias, self.eps).permute(0, 3, 1, 2) else: x = _layer_norm_cf(x, self.weight, self.bias, self.eps) return x class RmsNorm(nn.Module): """ RmsNorm w/ fast (apex) norm if available """ __constants__ = ['normalized_shape', 'eps', 'elementwise_affine', '_fast_norm'] normalized_shape: Tuple[int, ...] eps: float elementwise_affine: bool _fast_norm: bool def __init__(self, channels, eps=1e-6, affine=True, device=None, dtype=None) -> None: factory_kwargs = {'device': device, 'dtype': dtype} super().__init__() normalized_shape = channels if isinstance(normalized_shape, numbers.Integral): # mypy error: incompatible types in assignment normalized_shape = (normalized_shape,) # type: ignore[assignment] self.normalized_shape = tuple(normalized_shape) # type: ignore[arg-type] self.eps = eps self.elementwise_affine = affine self._fast_norm = is_fast_norm() # can't script unless we have these flags here (no globals) if self.elementwise_affine: self.weight = nn.Parameter(torch.empty(self.normalized_shape, **factory_kwargs)) else: self.register_parameter('weight', None) self.reset_parameters() def reset_parameters(self) -> None: if self.elementwise_affine: nn.init.ones_(self.weight) def forward(self, x: torch.Tensor) -> torch.Tensor: # NOTE fast norm fallback needs our rms norm impl, so both paths through here. # Since there is no built-in PyTorch impl, always use APEX RmsNorm if is installed. if self._fast_norm: x = fast_rms_norm(x, self.normalized_shape, self.weight, self.eps) else: x = rms_norm(x, self.normalized_shape, self.weight, self.eps) return x class RmsNorm2d(nn.Module): """ RmsNorm w/ fast (apex) norm if available """ __constants__ = ['normalized_shape', 'eps', 'elementwise_affine', '_fast_norm'] normalized_shape: Tuple[int, ...] eps: float elementwise_affine: bool _fast_norm: bool def __init__(self, channels, eps=1e-6, affine=True, device=None, dtype=None) -> None: factory_kwargs = {'device': device, 'dtype': dtype} super().__init__() normalized_shape = channels if isinstance(normalized_shape, numbers.Integral): # mypy error: incompatible types in assignment normalized_shape = (normalized_shape,) # type: ignore[assignment] self.normalized_shape = tuple(normalized_shape) # type: ignore[arg-type] self.eps = eps self.elementwise_affine = affine self._fast_norm = is_fast_norm() # can't script unless we have these flags here (no globals) if self.elementwise_affine: self.weight = nn.Parameter(torch.empty(self.normalized_shape, **factory_kwargs)) else: self.register_parameter('weight', None) self.reset_parameters() def reset_parameters(self) -> None: if self.elementwise_affine: nn.init.ones_(self.weight) def forward(self, x: torch.Tensor) -> torch.Tensor: x = x.permute(0, 2, 3, 1) # NOTE fast norm fallback needs our rms norm impl, so both paths through here. # Since there is no built-in PyTorch impl, always use APEX RmsNorm if is installed. if self._fast_norm: x = fast_rms_norm(x, self.normalized_shape, self.weight, self.eps) else: x = rms_norm(x, self.normalized_shape, self.weight, self.eps) x = x.permute(0, 3, 1, 2) return x class SimpleNorm(nn.Module): """ SimpleNorm (x / std(x)) """ __constants__ = ['normalized_shape', 'eps', 'elementwise_affine', '_fast_norm'] normalized_shape: Tuple[int, ...] eps: float elementwise_affine: bool _fast_norm: bool def __init__(self, channels, eps=1e-6, affine=True, device=None, dtype=None) -> None: factory_kwargs = {'device': device, 'dtype': dtype} super().__init__() normalized_shape = channels if isinstance(normalized_shape, numbers.Integral): # mypy error: incompatible types in assignment normalized_shape = (normalized_shape,) # type: ignore[assignment] self.normalized_shape = tuple(normalized_shape) # type: ignore[arg-type] self.eps = eps self.elementwise_affine = affine self._fast_norm = is_fast_norm() # can't script unless we have these flags here (no globals) if self.elementwise_affine: self.weight = nn.Parameter(torch.empty(self.normalized_shape, **factory_kwargs)) else: self.register_parameter('weight', None) self.reset_parameters() def reset_parameters(self) -> None: if self.elementwise_affine: nn.init.ones_(self.weight) def forward(self, x: torch.Tensor) -> torch.Tensor: if self._fast_norm: x = fast_simple_norm(x, self.normalized_shape, self.weight, self.eps) else: x = simple_norm(x, self.normalized_shape, self.weight, self.eps) return x class SimpleNorm2d(nn.Module): """ SimpleNorm for NCHW tensors """ __constants__ = ['normalized_shape', 'eps', 'elementwise_affine', '_fast_norm'] normalized_shape: Tuple[int, ...] eps: float elementwise_affine: bool _fast_norm: bool def __init__(self, channels, eps=1e-6, affine=True, device=None, dtype=None) -> None: factory_kwargs = {'device': device, 'dtype': dtype} super().__init__() normalized_shape = channels if isinstance(normalized_shape, numbers.Integral): # mypy error: incompatible types in assignment normalized_shape = (normalized_shape,) # type: ignore[assignment] self.normalized_shape = tuple(normalized_shape) # type: ignore[arg-type] self.eps = eps self.elementwise_affine = affine self._fast_norm = is_fast_norm() # can't script unless we have these flags here (no globals) if self.elementwise_affine: self.weight = nn.Parameter(torch.empty(self.normalized_shape, **factory_kwargs)) else: self.register_parameter('weight', None) self.reset_parameters() def reset_parameters(self) -> None: if self.elementwise_affine: nn.init.ones_(self.weight) def forward(self, x: torch.Tensor) -> torch.Tensor: x = x.permute(0, 2, 3, 1) if self._fast_norm: x = fast_simple_norm(x, self.normalized_shape, self.weight, self.eps) else: x = simple_norm(x, self.normalized_shape, self.weight, self.eps) x = x.permute(0, 3, 1, 2) return x
pytorch-image-models/timm/layers/norm.py/0
{ "file_path": "pytorch-image-models/timm/layers/norm.py", "repo_id": "pytorch-image-models", "token_count": 4880 }
""" Test Time Pooling (Average-Max Pool) Hacked together by / Copyright 2020 Ross Wightman """ import logging from torch import nn import torch.nn.functional as F from .adaptive_avgmax_pool import adaptive_avgmax_pool2d _logger = logging.getLogger(__name__) class TestTimePoolHead(nn.Module): def __init__(self, base, original_pool=7): super(TestTimePoolHead, self).__init__() self.base = base self.original_pool = original_pool base_fc = self.base.get_classifier() if isinstance(base_fc, nn.Conv2d): self.fc = base_fc else: self.fc = nn.Conv2d( self.base.num_features, self.base.num_classes, kernel_size=1, bias=True) self.fc.weight.data.copy_(base_fc.weight.data.view(self.fc.weight.size())) self.fc.bias.data.copy_(base_fc.bias.data.view(self.fc.bias.size())) self.base.reset_classifier(0) # delete original fc layer def forward(self, x): x = self.base.forward_features(x) x = F.avg_pool2d(x, kernel_size=self.original_pool, stride=1) x = self.fc(x) x = adaptive_avgmax_pool2d(x, 1) return x.view(x.size(0), -1) def apply_test_time_pool(model, config, use_test_size=False): test_time_pool = False if not hasattr(model, 'default_cfg') or not model.default_cfg: return model, False if use_test_size and 'test_input_size' in model.default_cfg: df_input_size = model.default_cfg['test_input_size'] else: df_input_size = model.default_cfg['input_size'] if config['input_size'][-1] > df_input_size[-1] and config['input_size'][-2] > df_input_size[-2]: _logger.info('Target input size %s > pretrained default %s, using test time pooling' % (str(config['input_size'][-2:]), str(df_input_size[-2:]))) model = TestTimePoolHead(model, original_pool=model.default_cfg['pool_size']) test_time_pool = True return model, test_time_pool
pytorch-image-models/timm/layers/test_time_pool.py/0
{ "file_path": "pytorch-image-models/timm/layers/test_time_pool.py", "repo_id": "pytorch-image-models", "token_count": 881 }
""" Model creation / weight loading / state_dict helpers Hacked together by / Copyright 2020 Ross Wightman """ import logging import os from typing import Any, Callable, Dict, Optional, Union import torch try: import safetensors.torch _has_safetensors = True except ImportError: _has_safetensors = False _logger = logging.getLogger(__name__) __all__ = ['clean_state_dict', 'load_state_dict', 'load_checkpoint', 'remap_state_dict', 'resume_checkpoint'] def _remove_prefix(text, prefix): # FIXME replace with 3.9 stdlib fn when min at 3.9 if text.startswith(prefix): return text[len(prefix):] return text def clean_state_dict(state_dict: Dict[str, Any]) -> Dict[str, Any]: # 'clean' checkpoint by removing .module prefix from state dict if it exists from parallel training cleaned_state_dict = {} to_remove = ( 'module.', # DDP wrapper '_orig_mod.', # torchcompile dynamo wrapper ) for k, v in state_dict.items(): for r in to_remove: k = _remove_prefix(k, r) cleaned_state_dict[k] = v return cleaned_state_dict def load_state_dict( checkpoint_path: str, use_ema: bool = True, device: Union[str, torch.device] = 'cpu', weights_only: bool = False, ) -> Dict[str, Any]: if checkpoint_path and os.path.isfile(checkpoint_path): # Check if safetensors or not and load weights accordingly if str(checkpoint_path).endswith(".safetensors"): assert _has_safetensors, "`pip install safetensors` to use .safetensors" checkpoint = safetensors.torch.load_file(checkpoint_path, device=device) else: try: checkpoint = torch.load(checkpoint_path, map_location=device, weights_only=weights_only) except TypeError: checkpoint = torch.load(checkpoint_path, map_location=device) state_dict_key = '' if isinstance(checkpoint, dict): if use_ema and checkpoint.get('state_dict_ema', None) is not None: state_dict_key = 'state_dict_ema' elif use_ema and checkpoint.get('model_ema', None) is not None: state_dict_key = 'model_ema' elif 'state_dict' in checkpoint: state_dict_key = 'state_dict' elif 'model' in checkpoint: state_dict_key = 'model' state_dict = clean_state_dict(checkpoint[state_dict_key] if state_dict_key else checkpoint) _logger.info("Loaded {} from checkpoint '{}'".format(state_dict_key, checkpoint_path)) return state_dict else: _logger.error("No checkpoint found at '{}'".format(checkpoint_path)) raise FileNotFoundError() def load_checkpoint( model: torch.nn.Module, checkpoint_path: str, use_ema: bool = True, device: Union[str, torch.device] = 'cpu', strict: bool = True, remap: bool = False, filter_fn: Optional[Callable] = None, weights_only: bool = False, ): if os.path.splitext(checkpoint_path)[-1].lower() in ('.npz', '.npy'): # numpy checkpoint, try to load via model specific load_pretrained fn if hasattr(model, 'load_pretrained'): model.load_pretrained(checkpoint_path) else: raise NotImplementedError('Model cannot load numpy checkpoint') return state_dict = load_state_dict(checkpoint_path, use_ema, device=device, weights_only=weights_only) if remap: state_dict = remap_state_dict(state_dict, model) elif filter_fn: state_dict = filter_fn(state_dict, model) incompatible_keys = model.load_state_dict(state_dict, strict=strict) return incompatible_keys def remap_state_dict( state_dict: Dict[str, Any], model: torch.nn.Module, allow_reshape: bool = True ): """ remap checkpoint by iterating over state dicts in order (ignoring original keys). This assumes models (and originating state dict) were created with params registered in same order. """ out_dict = {} for (ka, va), (kb, vb) in zip(model.state_dict().items(), state_dict.items()): assert va.numel() == vb.numel(), f'Tensor size mismatch {ka}: {va.shape} vs {kb}: {vb.shape}. Remap failed.' if va.shape != vb.shape: if allow_reshape: vb = vb.reshape(va.shape) else: assert False, f'Tensor shape mismatch {ka}: {va.shape} vs {kb}: {vb.shape}. Remap failed.' out_dict[ka] = vb return out_dict def resume_checkpoint( model: torch.nn.Module, checkpoint_path: str, optimizer: torch.optim.Optimizer = None, loss_scaler: Any = None, log_info: bool = True, ): resume_epoch = None if os.path.isfile(checkpoint_path): checkpoint = torch.load(checkpoint_path, map_location='cpu', weights_only=False) if isinstance(checkpoint, dict) and 'state_dict' in checkpoint: if log_info: _logger.info('Restoring model state from checkpoint...') state_dict = clean_state_dict(checkpoint['state_dict']) model.load_state_dict(state_dict) if optimizer is not None and 'optimizer' in checkpoint: if log_info: _logger.info('Restoring optimizer state from checkpoint...') optimizer.load_state_dict(checkpoint['optimizer']) if loss_scaler is not None and loss_scaler.state_dict_key in checkpoint: if log_info: _logger.info('Restoring AMP loss scaler state from checkpoint...') loss_scaler.load_state_dict(checkpoint[loss_scaler.state_dict_key]) if 'epoch' in checkpoint: resume_epoch = checkpoint['epoch'] if 'version' in checkpoint and checkpoint['version'] > 1: resume_epoch += 1 # start at the next epoch, old checkpoints incremented before save if log_info: _logger.info("Loaded checkpoint '{}' (epoch {})".format(checkpoint_path, checkpoint['epoch'])) else: model.load_state_dict(checkpoint) if log_info: _logger.info("Loaded checkpoint '{}'".format(checkpoint_path)) return resume_epoch else: _logger.error("No checkpoint found at '{}'".format(checkpoint_path)) raise FileNotFoundError()
pytorch-image-models/timm/models/_helpers.py/0
{ "file_path": "pytorch-image-models/timm/models/_helpers.py", "repo_id": "pytorch-image-models", "token_count": 2793 }
""" ConViT Model @article{d2021convit, title={ConViT: Improving Vision Transformers with Soft Convolutional Inductive Biases}, author={d'Ascoli, St{\'e}phane and Touvron, Hugo and Leavitt, Matthew and Morcos, Ari and Biroli, Giulio and Sagun, Levent}, journal={arXiv preprint arXiv:2103.10697}, year={2021} } Paper link: https://arxiv.org/abs/2103.10697 Original code: https://github.com/facebookresearch/convit, original copyright below Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman """ # Copyright (c) 2015-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the CC-by-NC license found in the # LICENSE file in the root directory of this source tree. # '''These modules are adapted from those of timm, see https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py ''' from typing import Optional import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import DropPath, trunc_normal_, PatchEmbed, Mlp, LayerNorm, HybridEmbed from ._builder import build_model_with_cfg from ._features_fx import register_notrace_module from ._registry import register_model, generate_default_cfgs __all__ = ['ConVit'] @register_notrace_module # reason: FX can't symbolically trace control flow in forward method class GPSA(nn.Module): def __init__( self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0., locality_strength=1., ): super().__init__() self.num_heads = num_heads self.dim = dim head_dim = dim // num_heads self.scale = head_dim ** -0.5 self.locality_strength = locality_strength self.qk = nn.Linear(dim, dim * 2, bias=qkv_bias) self.v = nn.Linear(dim, dim, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.pos_proj = nn.Linear(3, num_heads) self.proj_drop = nn.Dropout(proj_drop) self.gating_param = nn.Parameter(torch.ones(self.num_heads)) self.rel_indices: torch.Tensor = torch.zeros(1, 1, 1, 3) # silly torchscript hack, won't work with None def forward(self, x): B, N, C = x.shape if self.rel_indices is None or self.rel_indices.shape[1] != N: self.rel_indices = self.get_rel_indices(N) attn = self.get_attention(x) v = self.v(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x def get_attention(self, x): B, N, C = x.shape qk = self.qk(x).reshape(B, N, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k = qk[0], qk[1] pos_score = self.rel_indices.expand(B, -1, -1, -1) pos_score = self.pos_proj(pos_score).permute(0, 3, 1, 2) patch_score = (q @ k.transpose(-2, -1)) * self.scale patch_score = patch_score.softmax(dim=-1) pos_score = pos_score.softmax(dim=-1) gating = self.gating_param.view(1, -1, 1, 1) attn = (1. - torch.sigmoid(gating)) * patch_score + torch.sigmoid(gating) * pos_score attn /= attn.sum(dim=-1).unsqueeze(-1) attn = self.attn_drop(attn) return attn def get_attention_map(self, x, return_map=False): attn_map = self.get_attention(x).mean(0) # average over batch distances = self.rel_indices.squeeze()[:, :, -1] ** .5 dist = torch.einsum('nm,hnm->h', (distances, attn_map)) / distances.size(0) if return_map: return dist, attn_map else: return dist def local_init(self): self.v.weight.data.copy_(torch.eye(self.dim)) locality_distance = 1 # max(1,1/locality_strength**.5) kernel_size = int(self.num_heads ** .5) center = (kernel_size - 1) / 2 if kernel_size % 2 == 0 else kernel_size // 2 for h1 in range(kernel_size): for h2 in range(kernel_size): position = h1 + kernel_size * h2 self.pos_proj.weight.data[position, 2] = -1 self.pos_proj.weight.data[position, 1] = 2 * (h1 - center) * locality_distance self.pos_proj.weight.data[position, 0] = 2 * (h2 - center) * locality_distance self.pos_proj.weight.data *= self.locality_strength def get_rel_indices(self, num_patches: int) -> torch.Tensor: img_size = int(num_patches ** .5) rel_indices = torch.zeros(1, num_patches, num_patches, 3) ind = torch.arange(img_size).view(1, -1) - torch.arange(img_size).view(-1, 1) indx = ind.repeat(img_size, img_size) indy = ind.repeat_interleave(img_size, dim=0).repeat_interleave(img_size, dim=1) indd = indx ** 2 + indy ** 2 rel_indices[:, :, :, 2] = indd.unsqueeze(0) rel_indices[:, :, :, 1] = indy.unsqueeze(0) rel_indices[:, :, :, 0] = indx.unsqueeze(0) device = self.qk.weight.device return rel_indices.to(device) class MHSA(nn.Module): def __init__( self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0., ): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def get_attention_map(self, x, return_map=False): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] attn_map = (q @ k.transpose(-2, -1)) * self.scale attn_map = attn_map.softmax(dim=-1).mean(0) img_size = int(N ** .5) ind = torch.arange(img_size).view(1, -1) - torch.arange(img_size).view(-1, 1) indx = ind.repeat(img_size, img_size) indy = ind.repeat_interleave(img_size, dim=0).repeat_interleave(img_size, dim=1) indd = indx ** 2 + indy ** 2 distances = indd ** .5 distances = distances.to(x.device) dist = torch.einsum('nm,hnm->h', (distances, attn_map)) / N if return_map: return dist, attn_map else: return dist def forward(self, x): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv.unbind(0) attn = (q @ k.transpose(-2, -1)) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class Block(nn.Module): def __init__( self, dim, num_heads, mlp_ratio=4., qkv_bias=False, proj_drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=LayerNorm, use_gpsa=True, locality_strength=1., ): super().__init__() self.norm1 = norm_layer(dim) self.use_gpsa = use_gpsa if self.use_gpsa: self.attn = GPSA( dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop, locality_strength=locality_strength, ) else: self.attn = MHSA( dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop, ) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp( in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=proj_drop, ) def forward(self, x): x = x + self.drop_path(self.attn(self.norm1(x))) x = x + self.drop_path(self.mlp(self.norm2(x))) return x class ConVit(nn.Module): """ Vision Transformer with support for patch or hybrid CNN input stage """ def __init__( self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, global_pool='token', embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=False, drop_rate=0., pos_drop_rate=0., proj_drop_rate=0., attn_drop_rate=0., drop_path_rate=0., hybrid_backbone=None, norm_layer=LayerNorm, local_up_to_layer=3, locality_strength=1., use_pos_embed=True, ): super().__init__() assert global_pool in ('', 'avg', 'token') embed_dim *= num_heads self.num_classes = num_classes self.global_pool = global_pool self.local_up_to_layer = local_up_to_layer self.num_features = self.head_hidden_size = self.embed_dim = embed_dim # for consistency with other models self.locality_strength = locality_strength self.use_pos_embed = use_pos_embed if hybrid_backbone is not None: self.patch_embed = HybridEmbed( hybrid_backbone, img_size=img_size, in_chans=in_chans, embed_dim=embed_dim) else: self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, ) num_patches = self.patch_embed.num_patches self.num_patches = num_patches self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) self.pos_drop = nn.Dropout(p=pos_drop_rate) if self.use_pos_embed: self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) trunc_normal_(self.pos_embed, std=.02) dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule self.blocks = nn.ModuleList([ Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, use_gpsa=i < local_up_to_layer, locality_strength=locality_strength, ) for i in range(depth)]) self.norm = norm_layer(embed_dim) # Classifier head self.feature_info = [dict(num_chs=embed_dim, reduction=0, module='head')] self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() trunc_normal_(self.cls_token, std=.02) self.apply(self._init_weights) for n, m in self.named_modules(): if hasattr(m, 'local_init'): m.local_init() def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) @torch.jit.ignore def no_weight_decay(self): return {'pos_embed', 'cls_token'} @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^cls_token|pos_embed|patch_embed', # stem and embed blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))] ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): self.num_classes = num_classes if global_pool is not None: assert global_pool in ('', 'token', 'avg') self.global_pool = global_pool self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() def forward_features(self, x): x = self.patch_embed(x) if self.use_pos_embed: x = x + self.pos_embed x = self.pos_drop(x) cls_tokens = self.cls_token.expand(x.shape[0], -1, -1) for u, blk in enumerate(self.blocks): if u == self.local_up_to_layer: x = torch.cat((cls_tokens, x), dim=1) x = blk(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): if self.global_pool: x = x[:, 1:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0] x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_convit(variant, pretrained=False, **kwargs): if kwargs.get('features_only', None): raise RuntimeError('features_only not implemented for Vision Transformer models.') return build_model_with_cfg(ConVit, variant, pretrained, **kwargs) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'fixed_input_size': True, 'first_conv': 'patch_embed.proj', 'classifier': 'head', **kwargs } default_cfgs = generate_default_cfgs({ # ConViT 'convit_tiny.fb_in1k': _cfg(hf_hub_id='timm/'), 'convit_small.fb_in1k': _cfg(hf_hub_id='timm/'), 'convit_base.fb_in1k': _cfg(hf_hub_id='timm/') }) @register_model def convit_tiny(pretrained=False, **kwargs) -> ConVit: model_args = dict( local_up_to_layer=10, locality_strength=1.0, embed_dim=48, num_heads=4) model = _create_convit(variant='convit_tiny', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convit_small(pretrained=False, **kwargs) -> ConVit: model_args = dict( local_up_to_layer=10, locality_strength=1.0, embed_dim=48, num_heads=9) model = _create_convit(variant='convit_small', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convit_base(pretrained=False, **kwargs) -> ConVit: model_args = dict( local_up_to_layer=10, locality_strength=1.0, embed_dim=48, num_heads=16) model = _create_convit(variant='convit_base', pretrained=pretrained, **dict(model_args, **kwargs)) return model
pytorch-image-models/timm/models/convit.py/0
{ "file_path": "pytorch-image-models/timm/models/convit.py", "repo_id": "pytorch-image-models", "token_count": 7721 }
""" EVA EVA from https://github.com/baaivision/EVA , paper: https://arxiv.org/abs/2211.07636 @article{EVA, title={EVA: Exploring the Limits of Masked Visual Representation Learning at Scale}, author={Fang, Yuxin and Wang, Wen and Xie, Binhui and Sun, Quan and Wu, Ledell and Wang, Xinggang and Huang, Tiejun and Wang, Xinlong and Cao, Yue}, journal={arXiv preprint arXiv:2211.07636}, year={2022} } EVA-02: A Visual Representation for Neon Genesis - https://arxiv.org/abs/2303.11331 @article{EVA02, title={EVA-02: A Visual Representation for Neon Genesis}, author={Fang, Yuxin and Sun, Quan and Wang, Xinggang and Huang, Tiejun and Wang, Xinlong and Cao, Yue}, journal={arXiv preprint arXiv:2303.11331}, year={2023} } This file contains EVA & EVA02 model implementations evolved from BEiT, additional models in vision_transformer.py. Modifications by / Copyright 2023 Ross Wightman, original copyrights below """ # EVA models Copyright (c) 2022 BAAI-Vision # EVA02 models Copyright (c) 2023 BAAI-Vision import math from typing import Callable, List, Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, OPENAI_CLIP_MEAN, OPENAI_CLIP_STD from timm.layers import PatchEmbed, Mlp, GluMlp, SwiGLU, LayerNorm, DropPath, PatchDropout, RotaryEmbeddingCat, \ apply_rot_embed_cat, apply_keep_indices_nlc, trunc_normal_, resample_patch_embed, resample_abs_pos_embed, \ to_2tuple, use_fused_attn from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._manipulate import checkpoint from ._registry import generate_default_cfgs, register_model __all__ = ['Eva'] class EvaAttention(nn.Module): fused_attn: torch.jit.Final[bool] def __init__( self, dim: int, num_heads: int = 8, qkv_bias: bool = True, qkv_fused: bool = True, num_prefix_tokens: int = 1, qkv_bias_separate: bool = False, attn_drop: float = 0., proj_drop: float = 0., attn_head_dim: Optional[int] = None, norm_layer: Optional[Callable] = None, ): """ Args: dim: num_heads: qkv_bias: qkv_fused: attn_drop: proj_drop: attn_head_dim: norm_layer: """ super().__init__() self.num_heads = num_heads head_dim = dim // num_heads if attn_head_dim is not None: head_dim = attn_head_dim all_head_dim = head_dim * self.num_heads self.scale = head_dim ** -0.5 self.num_prefix_tokens = num_prefix_tokens self.fused_attn = use_fused_attn() self.qkv_bias_separate = qkv_bias_separate if qkv_fused: self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False) self.q_proj = self.k_proj = self.v_proj = None if qkv_bias: self.q_bias = nn.Parameter(torch.zeros(all_head_dim)) self.register_buffer('k_bias', torch.zeros(all_head_dim), persistent=False) self.v_bias = nn.Parameter(torch.zeros(all_head_dim)) else: self.q_bias = self.k_bias = self.v_bias = None else: self.q_proj = nn.Linear(dim, all_head_dim, bias=qkv_bias) self.k_proj = nn.Linear(dim, all_head_dim, bias=False) self.v_proj = nn.Linear(dim, all_head_dim, bias=qkv_bias) self.qkv = None self.q_bias = self.k_bias = self.v_bias = None self.attn_drop = nn.Dropout(attn_drop) self.norm = norm_layer(all_head_dim) if norm_layer is not None else nn.Identity() self.proj = nn.Linear(all_head_dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward( self, x, rope: Optional[torch.Tensor] = None, attn_mask: Optional[torch.Tensor] = None, ): B, N, C = x.shape if self.qkv is not None: if self.q_bias is None: qkv = self.qkv(x) else: qkv_bias = torch.cat((self.q_bias, self.k_bias, self.v_bias)) if self.qkv_bias_separate: qkv = self.qkv(x) qkv += qkv_bias else: qkv = F.linear(x, weight=self.qkv.weight, bias=qkv_bias) qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) q, k, v = qkv.unbind(0) # B, num_heads, N, head_dim else: q = self.q_proj(x).reshape(B, N, self.num_heads, -1).transpose(1, 2) # B, num_heads, N, C k = self.k_proj(x).reshape(B, N, self.num_heads, -1).transpose(1, 2) v = self.v_proj(x).reshape(B, N, self.num_heads, -1).transpose(1, 2) if rope is not None: npt = self.num_prefix_tokens q = torch.cat([q[:, :, :npt, :], apply_rot_embed_cat(q[:, :, npt:, :], rope)], dim=2).type_as(v) k = torch.cat([k[:, :, :npt, :], apply_rot_embed_cat(k[:, :, npt:, :], rope)], dim=2).type_as(v) if self.fused_attn: x = F.scaled_dot_product_attention( q, k, v, attn_mask=attn_mask, dropout_p=self.attn_drop.p if self.training else 0., ) else: q = q * self.scale attn = (q @ k.transpose(-2, -1)) if attn_mask is not None: attn_mask = attn_mask.to(torch.bool) attn = attn.masked_fill(~attn_mask[:, None, None, :], float("-inf")) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.transpose(1, 2).reshape(B, N, C) x = self.norm(x) x = self.proj(x) x = self.proj_drop(x) return x class EvaBlock(nn.Module): def __init__( self, dim: int, num_heads: int, qkv_bias: bool = True, qkv_fused: bool = True, mlp_ratio: float = 4., swiglu_mlp: bool = False, scale_mlp: bool = False, scale_attn_inner: bool = False, num_prefix_tokens: int = 1, proj_drop: float = 0., attn_drop: float = 0., drop_path: float = 0., init_values: Optional[float] = None, act_layer: Callable = nn.GELU, norm_layer: Callable = LayerNorm, attn_head_dim: Optional[int] = None, ): """ Args: dim: num_heads: qkv_bias: qkv_fused: mlp_ratio: swiglu_mlp: scale_mlp: scale_attn_inner: proj_drop: attn_drop: drop_path: init_values: act_layer: norm_layer: attn_head_dim: """ super().__init__() self.norm1 = norm_layer(dim) self.attn = EvaAttention( dim, num_heads=num_heads, qkv_bias=qkv_bias, qkv_fused=qkv_fused, num_prefix_tokens=num_prefix_tokens, attn_drop=attn_drop, proj_drop=proj_drop, attn_head_dim=attn_head_dim, norm_layer=norm_layer if scale_attn_inner else None, ) self.gamma_1 = nn.Parameter(init_values * torch.ones(dim)) if init_values is not None else None self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) hidden_features = int(dim * mlp_ratio) if swiglu_mlp: if scale_mlp: # when norm in SwiGLU used, an impl with separate fc for gate & x is used self.mlp = SwiGLU( in_features=dim, hidden_features=hidden_features, norm_layer=norm_layer if scale_mlp else None, drop=proj_drop, ) else: # w/o any extra norm, an impl with packed weights is used, matches existing GluMLP self.mlp = GluMlp( in_features=dim, hidden_features=hidden_features * 2, norm_layer=norm_layer if scale_mlp else None, act_layer=nn.SiLU, gate_last=False, drop=proj_drop, ) else: self.mlp = Mlp( in_features=dim, hidden_features=hidden_features, act_layer=act_layer, norm_layer=norm_layer if scale_mlp else None, drop=proj_drop, ) self.gamma_2 = nn.Parameter(init_values * torch.ones(dim)) if init_values is not None else None self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x, rope: Optional[torch.Tensor] = None, attn_mask: Optional[torch.Tensor] = None): if self.gamma_1 is None: x = x + self.drop_path1(self.attn(self.norm1(x), rope=rope, attn_mask=attn_mask)) x = x + self.drop_path2(self.mlp(self.norm2(x))) else: x = x + self.drop_path1(self.gamma_1 * self.attn(self.norm1(x), rope=rope, attn_mask=attn_mask)) x = x + self.drop_path2(self.gamma_2 * self.mlp(self.norm2(x))) return x class EvaBlockPostNorm(nn.Module): """ EVA block w/ post-norm and support for swiglu, MLP norm scale, ROPE. """ def __init__( self, dim: int, num_heads: int, qkv_bias: bool = True, qkv_fused: bool = True, mlp_ratio: float = 4., swiglu_mlp: bool = False, scale_mlp: bool = False, scale_attn_inner: bool = False, num_prefix_tokens: int = 1, proj_drop: float = 0., attn_drop: float = 0., drop_path: float = 0., init_values: Optional[float] = None, # ignore for post-norm act_layer: Callable = nn.GELU, norm_layer: Callable = nn.LayerNorm, attn_head_dim: Optional[int] = None, ): """ Args: dim: num_heads: qkv_bias: qkv_fused: mlp_ratio: swiglu_mlp: scale_mlp: scale_attn_inner: proj_drop: attn_drop: drop_path: init_values: act_layer: norm_layer: attn_head_dim: """ super().__init__() self.attn = EvaAttention( dim, num_heads=num_heads, qkv_bias=qkv_bias, qkv_fused=qkv_fused, num_prefix_tokens=num_prefix_tokens, attn_drop=attn_drop, proj_drop=proj_drop, attn_head_dim=attn_head_dim, norm_layer=norm_layer if scale_attn_inner else None, ) self.norm1 = norm_layer(dim) self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() hidden_features = int(dim * mlp_ratio) if swiglu_mlp: if scale_mlp: # when norm in SwiGLU used, an impl with separate fc for gate & x is used self.mlp = SwiGLU( in_features=dim, hidden_features=hidden_features, norm_layer=norm_layer if scale_mlp else None, drop=proj_drop, ) else: # w/o any extra norm, an impl with packed fc1 weights is used, matches existing GluMLP self.mlp = GluMlp( in_features=dim, hidden_features=hidden_features * 2, norm_layer=norm_layer if scale_mlp else None, act_layer=nn.SiLU, gate_last=False, drop=proj_drop, ) else: self.mlp = Mlp( in_features=dim, hidden_features=hidden_features, act_layer=act_layer, norm_layer=norm_layer if scale_mlp else None, drop=proj_drop, ) self.norm2 = norm_layer(dim) self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x, rope: Optional[torch.Tensor] = None, attn_mask: Optional[torch.Tensor] = None): x = x + self.drop_path1(self.norm1(self.attn(x, rope=rope, attn_mask=attn_mask))) x = x + self.drop_path2(self.norm2(self.mlp(x))) return x class Eva(nn.Module): """ Eva Vision Transformer w/ Abs & Rotary Pos Embed This class implements the EVA and EVA02 models that were based on the BEiT ViT variant * EVA - abs pos embed, global avg pool * EVA02 - abs + rope pos embed, global avg pool, SwiGLU, scale Norm in MLP (ala normformer) """ def __init__( self, img_size: Union[int, Tuple[int, int]] = 224, patch_size: Union[int, Tuple[int, int]] = 16, in_chans: int = 3, num_classes: int = 1000, global_pool: str = 'avg', embed_dim: int = 768, depth: int = 12, num_heads: int = 12, qkv_bias: bool = True, qkv_fused: bool = True, mlp_ratio: float = 4., swiglu_mlp: bool = False, scale_mlp: bool = False, scale_attn_inner: bool = False, drop_rate: float = 0., pos_drop_rate: float = 0., patch_drop_rate: float = 0., proj_drop_rate: float = 0., attn_drop_rate: float = 0., drop_path_rate: float = 0., norm_layer: Callable = LayerNorm, init_values: Optional[float] = None, class_token: bool = True, num_reg_tokens: int = 0, use_abs_pos_emb: bool = True, use_rot_pos_emb: bool = False, use_post_norm: bool = False, dynamic_img_size: bool = False, dynamic_img_pad: bool = False, ref_feat_shape: Optional[Union[Tuple[int, int], int]] = None, head_init_scale: float = 0.001, ): """ Args: img_size: patch_size: in_chans: num_classes: global_pool: embed_dim: depth: num_heads: qkv_bias: qkv_fused: mlp_ratio: swiglu_mlp: scale_mlp: scale_attn_inner: drop_rate: pos_drop_rate: proj_drop_rate: attn_drop_rate: drop_path_rate: norm_layer: init_values: class_token: use_abs_pos_emb: use_rot_pos_emb: use_post_norm: ref_feat_shape: head_init_scale: """ super().__init__() self.num_classes = num_classes self.global_pool = global_pool self.num_features = self.head_hidden_size = self.embed_dim = embed_dim # for consistency with other models self.num_prefix_tokens = (1 if class_token else 0) + num_reg_tokens self.dynamic_img_size = dynamic_img_size self.grad_checkpointing = False embed_args = {} if dynamic_img_size: # flatten deferred until after pos embed embed_args.update(dict(strict_img_size=False, output_fmt='NHWC')) self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, dynamic_img_pad=dynamic_img_pad, **embed_args, ) num_patches = self.patch_embed.num_patches r = self.patch_embed.feat_ratio() if hasattr(self.patch_embed, 'feat_ratio') else patch_size self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if class_token else None self.reg_token = nn.Parameter(torch.zeros(1, num_reg_tokens, embed_dim)) if num_reg_tokens else None self.cls_embed = class_token and self.reg_token is None self.pos_embed = nn.Parameter( torch.zeros(1, num_patches + self.num_prefix_tokens, embed_dim)) if use_abs_pos_emb else None self.pos_drop = nn.Dropout(p=pos_drop_rate) if patch_drop_rate > 0: self.patch_drop = PatchDropout( patch_drop_rate, num_prefix_tokens=self.num_prefix_tokens, return_indices=True, ) else: self.patch_drop = None if use_rot_pos_emb: ref_feat_shape = to_2tuple(ref_feat_shape) if ref_feat_shape is not None else None self.rope = RotaryEmbeddingCat( embed_dim // num_heads, in_pixels=False, feat_shape=None if dynamic_img_size else self.patch_embed.grid_size, ref_feat_shape=ref_feat_shape, ) else: self.rope = None dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule block_fn = EvaBlockPostNorm if use_post_norm else EvaBlock self.blocks = nn.ModuleList([ block_fn( dim=embed_dim, num_heads=num_heads, qkv_bias=qkv_bias, qkv_fused=qkv_fused, mlp_ratio=mlp_ratio, swiglu_mlp=swiglu_mlp, scale_mlp=scale_mlp, scale_attn_inner=scale_attn_inner, num_prefix_tokens=self.num_prefix_tokens, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, init_values=init_values, ) for i in range(depth)]) self.feature_info = [ dict(module=f'blocks.{i}', num_chs=embed_dim, reduction=r) for i in range(depth)] use_fc_norm = self.global_pool == 'avg' self.norm = nn.Identity() if use_fc_norm else norm_layer(embed_dim) self.fc_norm = norm_layer(embed_dim) if use_fc_norm else nn.Identity() self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() self.apply(self._init_weights) if self.pos_embed is not None: trunc_normal_(self.pos_embed, std=.02) if self.cls_token is not None: trunc_normal_(self.cls_token, std=.02) if self.reg_token is not None: trunc_normal_(self.reg_token, std=.02) self.fix_init_weight() if isinstance(self.head, nn.Linear): trunc_normal_(self.head.weight, std=.02) self.head.weight.data.mul_(head_init_scale) self.head.bias.data.mul_(head_init_scale) def fix_init_weight(self): def rescale(param, layer_id): param.div_(math.sqrt(2.0 * layer_id)) for layer_id, layer in enumerate(self.blocks): rescale(layer.attn.proj.weight.data, layer_id + 1) rescale(layer.mlp.fc2.weight.data, layer_id + 1) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if m.bias is not None: nn.init.zeros_(m.bias) @torch.jit.ignore def no_weight_decay(self): nwd = {'pos_embed', 'cls_token'} return nwd @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^cls_token|pos_embed|patch_embed', # stem and embed blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))], ) return matcher @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): self.num_classes = num_classes if global_pool is not None: self.global_pool = global_pool self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() def _pos_embed(self, x) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: if self.dynamic_img_size: B, H, W, C = x.shape if self.pos_embed is not None: prev_grid_size = self.patch_embed.grid_size pos_embed = resample_abs_pos_embed( self.pos_embed, new_size=(H, W), old_size=prev_grid_size, num_prefix_tokens=self.num_prefix_tokens, ) else: pos_embed = None x = x.view(B, -1, C) rot_pos_embed = self.rope.get_embed(shape=(H, W)) if self.rope is not None else None else: pos_embed = self.pos_embed rot_pos_embed = self.rope.get_embed() if self.rope is not None else None if self.cls_token is not None: x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1) if pos_embed is not None: x = x + pos_embed if self.reg_token is not None: to_cat = [] if self.cls_token is not None: to_cat.append(self.cls_token.expand(x.shape[0], -1, -1)) to_cat.append(self.reg_token.expand(x.shape[0], -1, -1)) x = torch.cat(to_cat + [x], dim=1) x = self.pos_drop(x) # obtain shared rotary position embedding and apply patch dropout if self.patch_drop is not None: x, keep_indices = self.patch_drop(x) if rot_pos_embed is not None and keep_indices is not None: rot_pos_embed = apply_keep_indices_nlc(x, rot_pos_embed, keep_indices) return x, rot_pos_embed def forward_intermediates( self, x: torch.Tensor, indices: Optional[Union[int, List[int]]] = None, return_prefix_tokens: bool = False, norm: bool = False, stop_early: bool = False, output_fmt: str = 'NCHW', intermediates_only: bool = False, ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: """ Forward features that returns intermediates. Args: x: Input image tensor indices: Take last n blocks if an int, if is a sequence, select by matching indices return_prefix_tokens: Return both prefix and spatial intermediate tokens norm: Apply norm layer to all intermediates stop_early: Stop iterating over blocks when last desired intermediate hit output_fmt: Shape of intermediate feature outputs intermediates_only: Only return intermediate features """ assert output_fmt in ('NCHW', 'NLC'), 'Output format for EVA-ViT features must be one of NCHW or NLC.' reshape = output_fmt == 'NCHW' intermediates = [] take_indices, max_index = feature_take_indices(len(self.blocks), indices) # forward pass B, _, height, width = x.shape x = self.patch_embed(x) x, rot_pos_embed = self._pos_embed(x) if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript blocks = self.blocks else: blocks = self.blocks[:max_index + 1] for i, blk in enumerate(blocks): x = blk(x, rope=rot_pos_embed) if i in take_indices: intermediates.append(self.norm(x) if norm else x) # process intermediates if self.num_prefix_tokens: # split prefix (e.g. class, distill) and spatial feature tokens prefix_tokens = [y[:, 0:self.num_prefix_tokens] for y in intermediates] intermediates = [y[:, self.num_prefix_tokens:] for y in intermediates] if reshape: # reshape to BCHW output format H, W = self.patch_embed.dynamic_feat_size((height, width)) intermediates = [y.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() for y in intermediates] if not torch.jit.is_scripting() and return_prefix_tokens: # return_prefix not support in torchscript due to poor type handling intermediates = list(zip(intermediates, prefix_tokens)) if intermediates_only: return intermediates x = self.norm(x) return x, intermediates def prune_intermediate_layers( self, indices: Union[int, List[int]] = 1, prune_norm: bool = False, prune_head: bool = True, ): """ Prune layers not required for specified intermediates. """ take_indices, max_index = feature_take_indices(len(self.blocks), indices) self.blocks = self.blocks[:max_index + 1] # truncate blocks if prune_norm: self.norm = nn.Identity() if prune_head: self.fc_norm = nn.Identity() self.reset_classifier(0, '') return take_indices def forward_features(self, x): x = self.patch_embed(x) x, rot_pos_embed = self._pos_embed(x) for blk in self.blocks: if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(blk, x, rope=rot_pos_embed) else: x = blk(x, rope=rot_pos_embed) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): if self.global_pool: x = x[:, self.num_prefix_tokens:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0] x = self.fc_norm(x) x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn( state_dict, model, interpolation='bicubic', antialias=True, ): """ convert patch embedding weight from manual patchify + linear proj to conv""" out_dict = {} state_dict = state_dict.get('model_ema', state_dict) state_dict = state_dict.get('model', state_dict) state_dict = state_dict.get('module', state_dict) state_dict = state_dict.get('state_dict', state_dict) # prefix for loading OpenCLIP compatible weights if 'visual.trunk.pos_embed' in state_dict: prefix = 'visual.trunk.' elif 'visual.pos_embed' in state_dict: prefix = 'visual.' else: prefix = '' mim_weights = prefix + 'mask_token' in state_dict no_qkv = prefix + 'blocks.0.attn.q_proj.weight' in state_dict len_prefix = len(prefix) for k, v in state_dict.items(): if prefix: if k.startswith(prefix): k = k[len_prefix:] else: continue if 'rope' in k: # fixed embedding no need to load buffer from checkpoint continue if 'patch_embed.proj.weight' in k: _, _, H, W = model.patch_embed.proj.weight.shape if v.shape[-1] != W or v.shape[-2] != H: v = resample_patch_embed( v, (H, W), interpolation=interpolation, antialias=antialias, verbose=True, ) elif k == 'pos_embed' and v.shape[1] != model.pos_embed.shape[1]: # To resize pos embedding when using model at different size from pretrained weights num_prefix_tokens = 0 if getattr(model, 'no_embed_class', False) else getattr(model, 'num_prefix_tokens', 1) v = resample_abs_pos_embed( v, new_size=model.patch_embed.grid_size, num_prefix_tokens=num_prefix_tokens, interpolation=interpolation, antialias=antialias, verbose=True, ) k = k.replace('mlp.ffn_ln', 'mlp.norm') k = k.replace('attn.inner_attn_ln', 'attn.norm') k = k.replace('mlp.w12', 'mlp.fc1') k = k.replace('mlp.w1', 'mlp.fc1_g') k = k.replace('mlp.w2', 'mlp.fc1_x') k = k.replace('mlp.w3', 'mlp.fc2') if no_qkv: k = k.replace('q_bias', 'q_proj.bias') k = k.replace('v_bias', 'v_proj.bias') if mim_weights and k in ('mask_token', 'lm_head.weight', 'lm_head.bias', 'norm.weight', 'norm.bias'): if k == 'norm.weight' or k == 'norm.bias': # try moving norm -> fc norm on fine-tune, probably a better starting point than new init k = k.replace('norm', 'fc_norm') else: # skip pretrain mask token & head weights continue out_dict[k] = v return out_dict def _create_eva(variant, pretrained=False, **kwargs): out_indices = kwargs.pop('out_indices', 3) model = build_model_with_cfg( Eva, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), **kwargs, ) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': OPENAI_CLIP_MEAN, 'std': OPENAI_CLIP_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head', 'license': 'mit', **kwargs } default_cfgs = generate_default_cfgs({ # EVA 01 CLIP fine-tuned on imagenet-1k 'eva_giant_patch14_224.clip_ft_in1k': _cfg( # hf_hub_id='BAAI/EVA', hf_hub_filename='eva_clip_vis_enc_sz224_ftcls_89p1.pt', hf_hub_id='timm/', ), 'eva_giant_patch14_336.clip_ft_in1k': _cfg( # hf_hub_id='BAAI/EVA', hf_hub_filename='eva_clip_vis_enc_sz336_ftcls_89p4.pt', hf_hub_id='timm/', input_size=(3, 336, 336), crop_pct=1.0, crop_mode='squash'), # MIM EVA 01 pretrain, ft on in22k -> in1k 'eva_giant_patch14_336.m30m_ft_in22k_in1k': _cfg( # hf_hub_id='BAAI/EVA', hf_hub_filename='eva_21k_1k_336px_psz14_ema_89p6.pt', hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, input_size=(3, 336, 336), crop_pct=1.0, crop_mode='squash'), 'eva_giant_patch14_560.m30m_ft_in22k_in1k': _cfg( # hf_hub_id='BAAI/EVA', hf_hub_filename='eva_21k_1k_560px_psz14_ema_89p7.pt', hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, input_size=(3, 560, 560), crop_pct=1.0, crop_mode='squash'), # in22k or m38m MIM pretrain w/ intermediate in22k fine-tune and final in1k fine-tune 'eva02_base_patch14_448.mim_in22k_ft_in22k_in1k': _cfg( # hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in21k_to_in1k/eva02_B_pt_in21k_medft_in21k_ft_in1k_p14.pt', hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash', ), 'eva02_large_patch14_448.mim_in22k_ft_in22k_in1k': _cfg( # hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in21k_to_in1k/eva02_L_pt_in21k_medft_in21k_ft_in1k_p14.pt', hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash', ), 'eva02_large_patch14_448.mim_m38m_ft_in22k_in1k': _cfg( hf_hub_id='timm/', #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in21k_to_in1k/eva02_L_pt_m38m_medft_in21k_ft_in1k_p14.pt', input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash', ), # in22k or m3m MIM pretrain w/ in1k fine-tune 'eva02_tiny_patch14_336.mim_in22k_ft_in1k': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in1k/eva02_Ti_pt_in21k_ft_in1k_p14.pt', hf_hub_id='timm/', input_size=(3, 336, 336), crop_pct=1.0, ), 'eva02_small_patch14_336.mim_in22k_ft_in1k': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in1k/eva02_S_pt_in21k_ft_in1k_p14.pt', hf_hub_id='timm/', input_size=(3, 336, 336), crop_pct=1.0, ), 'eva02_base_patch14_448.mim_in22k_ft_in1k': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in1k/eva02_B_pt_in21k_ft_in1k_p14.pt', hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, ), 'eva02_large_patch14_448.mim_in22k_ft_in1k': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in1k/eva02_L_pt_in21k_ft_in1k_p14.pt', hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, ), 'eva02_large_patch14_448.mim_m38m_ft_in1k': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in1k/eva02_L_pt_m38m_ft_in1k_p14.pt', hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, ), # in22k or m3m MIM pretrain w/ in22k fine-tune 'eva02_base_patch14_448.mim_in22k_ft_in22k': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in21k/eva02_B_pt_in21k_medft_in21k_p14.pt', hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash', num_classes=21841, ), 'eva02_large_patch14_448.mim_in22k_ft_in22k': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in21k/eva02_L_pt_in21k_medft_in21k_p14.pt', hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash', num_classes=21841, ), 'eva02_large_patch14_448.mim_m38m_ft_in22k': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in21k/eva02_L_pt_m38m_medft_in21k_p14.pt', hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash', num_classes=21841, ), # in22k or m38m MIM pretrain 'eva02_tiny_patch14_224.mim_in22k': _cfg( # hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/pt/eva02_Ti_pt_in21k_p14.pt', hf_hub_id='timm/', num_classes=0, ), 'eva02_small_patch14_224.mim_in22k': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/pt/eva02_S_pt_in21k_p14.pt', hf_hub_id='timm/', num_classes=0, ), 'eva02_base_patch14_224.mim_in22k': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/pt/eva02_B_pt_in21k_p14.pt', hf_hub_id='timm/', num_classes=0, ), 'eva02_large_patch14_224.mim_in22k': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/pt/eva02_L_pt_in21k_p14.pt', hf_hub_id='timm/', num_classes=0, ), 'eva02_large_patch14_224.mim_m38m': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/pt/eva02_L_pt_m38m_p14.pt', hf_hub_id='timm/', num_classes=0, ), # EVA01 and EVA02 CLIP image towers 'eva_giant_patch14_clip_224.laion400m': _cfg( # hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA01_CLIP_g_14_plus_psz14_s11B.pt', # hf_hub_id='timm/eva_giant_patch14_clip_224.laion400m_s11b_b41k', # float16 weights # hf_hub_filename='open_clip_pytorch_model.bin', hf_hub_id='timm/', num_classes=1024, ), 'eva_giant_patch14_clip_224.merged2b': _cfg( # hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA01_CLIP_g_14_plus_psz14_s11B.pt', # hf_hub_id='timm/eva_giant_patch14_plus_clip_224.merged2b_s11b_b114k', # float16 weights # hf_hub_filename='open_clip_pytorch_model.bin', hf_hub_id='timm/', num_classes=1024, ), 'eva02_base_patch16_clip_224.merged2b': _cfg( # hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA02_CLIP_L_psz14_s4B.pt', # hf_hub_id='timm/eva02_base_patch16_clip_224.merged2b_s8b_b131k', # float16 weights # hf_hub_filename='open_clip_pytorch_model.bin', hf_hub_id='timm/', num_classes=512, ), 'eva02_large_patch14_clip_224.merged2b': _cfg( # hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA02_CLIP_L_psz14_s4B.pt', # hf_hub_id='timm/eva02_large_patch14_clip_224.merged2b_s4b_b131k', # float16 weights # hf_hub_filename='open_clip_pytorch_model.bin', hf_hub_id='timm/', num_classes=768, ), 'eva02_large_patch14_clip_336.merged2b': _cfg( # hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA02_CLIP_L_psz14_s4B.pt', # hf_hub_id='timm/eva02_large_patch14_clip_336.merged2b_s6b_b61k', # float16 weights # hf_hub_filename='open_clip_pytorch_model.bin', hf_hub_id='timm/', input_size=(3, 336, 336), crop_pct=1.0, num_classes=768, ), 'eva02_enormous_patch14_clip_224.laion2b': _cfg( # hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA02_CLIP_E_psz14_plus_s9B.pt', # hf_hub_id='timm/eva02_enormous_patch14_clip_224.laion2b_s4b_b115k', # float16 weights # hf_hub_filename='open_clip_pytorch_model.bin', hf_hub_id='timm/', num_classes=1024, ), 'eva02_enormous_patch14_clip_224.laion2b_plus': _cfg( # hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA02_CLIP_E_psz14_plus_s9B.pt', # hf_hub_id='timm/eva02_enormous_patch14_plus_clip_224.laion2b_s9b_b144k', # bfloat16 weights # hf_hub_filename='open_clip_pytorch_model.bin', hf_hub_id='timm/', num_classes=1024, ), 'eva02_enormous_patch14_clip_224.pretrain': _cfg( # hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA02_E_psz14.pt', num_classes=0, ), 'vit_medium_patch16_rope_reg1_gap_256.sbb_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95, mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5) ), 'vit_mediumd_patch16_rope_reg1_gap_256.sbb_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95, mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5) ), 'vit_betwixt_patch16_rope_reg4_gap_256.sbb_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95, ), 'vit_base_patch16_rope_reg1_gap_256.sbb_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95, mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5) ), }) @register_model def eva_giant_patch14_224(pretrained=False, **kwargs) -> Eva: """ EVA-g model https://arxiv.org/abs/2211.07636 """ model_args = dict(patch_size=14, embed_dim=1408, depth=40, num_heads=16, mlp_ratio=6144 / 1408) model = _create_eva('eva_giant_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva_giant_patch14_336(pretrained=False, **kwargs) -> Eva: """ EVA-g model https://arxiv.org/abs/2211.07636 """ model_args = dict(patch_size=14, embed_dim=1408, depth=40, num_heads=16, mlp_ratio=6144 / 1408) model = _create_eva('eva_giant_patch14_336', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva_giant_patch14_560(pretrained=False, **kwargs) -> Eva: """ EVA-g model https://arxiv.org/abs/2211.07636 """ model_args = dict(patch_size=14, embed_dim=1408, depth=40, num_heads=16, mlp_ratio=6144 / 1408) model = _create_eva('eva_giant_patch14_560', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_tiny_patch14_224(pretrained=False, **kwargs) -> Eva: model_args = dict( img_size=224, patch_size=14, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4 * 2 / 3, swiglu_mlp=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), # 224/14 ) model = _create_eva('eva02_tiny_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_small_patch14_224(pretrained=False, **kwargs) -> Eva: model_args = dict( img_size=224, patch_size=14, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4 * 2 / 3, swiglu_mlp=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), # 224/14 ) model = _create_eva('eva02_small_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_base_patch14_224(pretrained=False, **kwargs) -> Eva: model_args = dict( img_size=224, patch_size=14, embed_dim=768, depth=12, num_heads=12, qkv_fused=False, mlp_ratio=4 * 2 / 3, swiglu_mlp=True, scale_mlp=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), # 224/14 ) model = _create_eva('eva02_base_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_large_patch14_224(pretrained=False, **kwargs) -> Eva: model_args = dict( img_size=224, patch_size=14, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4 * 2 / 3, qkv_fused=False, swiglu_mlp=True, scale_mlp=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), # 224/14 ) model = _create_eva('eva02_large_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_tiny_patch14_336(pretrained=False, **kwargs) -> Eva: model_args = dict( img_size=336, patch_size=14, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4 * 2 / 3, swiglu_mlp=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), # 224/14 ) model = _create_eva('eva02_tiny_patch14_336', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_small_patch14_336(pretrained=False, **kwargs) -> Eva: model_args = dict( img_size=336, patch_size=14, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4 * 2 / 3, swiglu_mlp=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), # 224/14 ) model = _create_eva('eva02_small_patch14_336', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_base_patch14_448(pretrained=False, **kwargs) -> Eva: model_args = dict( img_size=448, patch_size=14, embed_dim=768, depth=12, num_heads=12, qkv_fused=False, mlp_ratio=4 * 2 / 3, swiglu_mlp=True, scale_mlp=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), # 224/14 ) model = _create_eva('eva02_base_patch14_448', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_large_patch14_448(pretrained=False, **kwargs) -> Eva: model_args = dict( img_size=448, patch_size=14, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4 * 2 / 3, qkv_fused=False, swiglu_mlp=True, scale_mlp=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), # 224/14 ) model = _create_eva('eva02_large_patch14_448', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva_giant_patch14_clip_224(pretrained=False, **kwargs) -> Eva: """ EVA-g CLIP model (only difference from non-CLIP is the pooling) """ model_args = dict( patch_size=14, embed_dim=1408, depth=40, num_heads=16, mlp_ratio=6144 / 1408, global_pool=kwargs.pop('global_pool', 'token')) model = _create_eva('eva_giant_patch14_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_base_patch16_clip_224(pretrained=False, **kwargs) -> Eva: """ A EVA-CLIP specific variant that adds additional attn scale layernorm to eva02_base """ model_args = dict( img_size=224, patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_fused=False, mlp_ratio=4 * 2 / 3, swiglu_mlp=True, scale_mlp=True, scale_attn_inner=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), # 224/14 global_pool=kwargs.pop('global_pool', 'token'), ) model = _create_eva('eva02_base_patch16_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_large_patch14_clip_224(pretrained=False, **kwargs) -> Eva: """ A EVA-CLIP specific variant that adds additional attn scale layernorm to eva02_large """ model_args = dict( img_size=224, patch_size=14, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4 * 2 / 3, qkv_fused=False, swiglu_mlp=True, scale_mlp=True, scale_attn_inner=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), # 224/14 global_pool=kwargs.pop('global_pool', 'token'), ) model = _create_eva('eva02_large_patch14_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_large_patch14_clip_336(pretrained=False, **kwargs) -> Eva: """ A EVA-CLIP specific variant that adds additional attn scale layernorm to eva02_large """ model_args = dict( img_size=336, patch_size=14, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4 * 2 / 3, qkv_fused=False, swiglu_mlp=True, scale_mlp=True, scale_attn_inner=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), # 224/14 global_pool=kwargs.pop('global_pool', 'token'), ) model = _create_eva('eva02_large_patch14_clip_336', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_enormous_patch14_clip_224(pretrained=False, **kwargs) -> Eva: """ A EVA-CLIP specific variant that uses residual post-norm in blocks """ model_args = dict( img_size=224, patch_size=14, embed_dim=1792, depth=64, num_heads=16, mlp_ratio=15360 / 1792, use_post_norm=True, global_pool=kwargs.pop('global_pool', 'token'), ) model = _create_eva('eva02_enormous_patch14_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_medium_patch16_rope_reg1_gap_256(pretrained=False, **kwargs) -> Eva: model_args = dict( img_size=256, patch_size=16, embed_dim=512, depth=12, num_heads=8, qkv_fused=True, qkv_bias=True, init_values=1e-5, class_token=False, num_reg_tokens=1, use_rot_pos_emb=True, use_abs_pos_emb=False, ref_feat_shape=(16, 16), # 224/14 ) model = _create_eva('vit_medium_patch16_rope_reg1_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_mediumd_patch16_rope_reg1_gap_256(pretrained=False, **kwargs) -> Eva: model_args = dict( img_size=256, patch_size=16, embed_dim=512, depth=20, num_heads=8, qkv_fused=True, qkv_bias=False, init_values=1e-5, class_token=False, num_reg_tokens=1, use_rot_pos_emb=True, use_abs_pos_emb=False, ref_feat_shape=(16, 16), # 224/14 ) model = _create_eva('vit_mediumd_patch16_rope_reg1_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_betwixt_patch16_rope_reg4_gap_256(pretrained=False, **kwargs) -> Eva: model_args = dict( img_size=256, patch_size=16, embed_dim=640, depth=12, num_heads=10, qkv_fused=True, qkv_bias=True, init_values=1e-5, class_token=False, num_reg_tokens=4, use_rot_pos_emb=True, use_abs_pos_emb=False, ref_feat_shape=(16, 16), # 224/14 ) model = _create_eva('vit_betwixt_patch16_rope_reg4_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_rope_reg1_gap_256(pretrained=False, **kwargs) -> Eva: model_args = dict( img_size=256, patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_fused=True, qkv_bias=True, init_values=1e-5, class_token=False, num_reg_tokens=1, use_rot_pos_emb=True, use_abs_pos_emb=False, ref_feat_shape=(16, 16), # 224/14 ) model = _create_eva('vit_base_patch16_rope_reg1_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model
pytorch-image-models/timm/models/eva.py/0
{ "file_path": "pytorch-image-models/timm/models/eva.py", "repo_id": "pytorch-image-models", "token_count": 25968 }
""" Pytorch Inception-Resnet-V2 implementation Sourced from https://github.com/Cadene/tensorflow-model-zoo.torch (MIT License) which is based upon Google's Tensorflow implementation and pretrained weights (Apache 2.0 License) """ from functools import partial import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD from timm.layers import create_classifier, ConvNormAct from ._builder import build_model_with_cfg from ._manipulate import flatten_modules from ._registry import register_model, generate_default_cfgs, register_model_deprecations __all__ = ['InceptionResnetV2'] class Mixed_5b(nn.Module): def __init__(self, conv_block=None): super(Mixed_5b, self).__init__() conv_block = conv_block or ConvNormAct self.branch0 = conv_block(192, 96, kernel_size=1, stride=1) self.branch1 = nn.Sequential( conv_block(192, 48, kernel_size=1, stride=1), conv_block(48, 64, kernel_size=5, stride=1, padding=2) ) self.branch2 = nn.Sequential( conv_block(192, 64, kernel_size=1, stride=1), conv_block(64, 96, kernel_size=3, stride=1, padding=1), conv_block(96, 96, kernel_size=3, stride=1, padding=1) ) self.branch3 = nn.Sequential( nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), conv_block(192, 64, kernel_size=1, stride=1) ) def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) x2 = self.branch2(x) x3 = self.branch3(x) out = torch.cat((x0, x1, x2, x3), 1) return out class Block35(nn.Module): def __init__(self, scale=1.0, conv_block=None): super(Block35, self).__init__() self.scale = scale conv_block = conv_block or ConvNormAct self.branch0 = conv_block(320, 32, kernel_size=1, stride=1) self.branch1 = nn.Sequential( conv_block(320, 32, kernel_size=1, stride=1), conv_block(32, 32, kernel_size=3, stride=1, padding=1) ) self.branch2 = nn.Sequential( conv_block(320, 32, kernel_size=1, stride=1), conv_block(32, 48, kernel_size=3, stride=1, padding=1), conv_block(48, 64, kernel_size=3, stride=1, padding=1) ) self.conv2d = nn.Conv2d(128, 320, kernel_size=1, stride=1) self.act = nn.ReLU() def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) x2 = self.branch2(x) out = torch.cat((x0, x1, x2), 1) out = self.conv2d(out) out = out * self.scale + x out = self.act(out) return out class Mixed_6a(nn.Module): def __init__(self, conv_block=None): super(Mixed_6a, self).__init__() conv_block = conv_block or ConvNormAct self.branch0 = conv_block(320, 384, kernel_size=3, stride=2) self.branch1 = nn.Sequential( conv_block(320, 256, kernel_size=1, stride=1), conv_block(256, 256, kernel_size=3, stride=1, padding=1), conv_block(256, 384, kernel_size=3, stride=2) ) self.branch2 = nn.MaxPool2d(3, stride=2) def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) x2 = self.branch2(x) out = torch.cat((x0, x1, x2), 1) return out class Block17(nn.Module): def __init__(self, scale=1.0, conv_block=None): super(Block17, self).__init__() self.scale = scale conv_block = conv_block or ConvNormAct self.branch0 = conv_block(1088, 192, kernel_size=1, stride=1) self.branch1 = nn.Sequential( conv_block(1088, 128, kernel_size=1, stride=1), conv_block(128, 160, kernel_size=(1, 7), stride=1, padding=(0, 3)), conv_block(160, 192, kernel_size=(7, 1), stride=1, padding=(3, 0)) ) self.conv2d = nn.Conv2d(384, 1088, kernel_size=1, stride=1) self.act = nn.ReLU() def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) out = torch.cat((x0, x1), 1) out = self.conv2d(out) out = out * self.scale + x out = self.act(out) return out class Mixed_7a(nn.Module): def __init__(self, conv_block=None): super(Mixed_7a, self).__init__() conv_block = conv_block or ConvNormAct self.branch0 = nn.Sequential( conv_block(1088, 256, kernel_size=1, stride=1), conv_block(256, 384, kernel_size=3, stride=2) ) self.branch1 = nn.Sequential( conv_block(1088, 256, kernel_size=1, stride=1), conv_block(256, 288, kernel_size=3, stride=2) ) self.branch2 = nn.Sequential( conv_block(1088, 256, kernel_size=1, stride=1), conv_block(256, 288, kernel_size=3, stride=1, padding=1), conv_block(288, 320, kernel_size=3, stride=2) ) self.branch3 = nn.MaxPool2d(3, stride=2) def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) x2 = self.branch2(x) x3 = self.branch3(x) out = torch.cat((x0, x1, x2, x3), 1) return out class Block8(nn.Module): def __init__(self, scale=1.0, no_relu=False, conv_block=None): super(Block8, self).__init__() self.scale = scale conv_block = conv_block or ConvNormAct self.branch0 = conv_block(2080, 192, kernel_size=1, stride=1) self.branch1 = nn.Sequential( conv_block(2080, 192, kernel_size=1, stride=1), conv_block(192, 224, kernel_size=(1, 3), stride=1, padding=(0, 1)), conv_block(224, 256, kernel_size=(3, 1), stride=1, padding=(1, 0)) ) self.conv2d = nn.Conv2d(448, 2080, kernel_size=1, stride=1) self.relu = None if no_relu else nn.ReLU() def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) out = torch.cat((x0, x1), 1) out = self.conv2d(out) out = out * self.scale + x if self.relu is not None: out = self.relu(out) return out class InceptionResnetV2(nn.Module): def __init__( self, num_classes=1000, in_chans=3, drop_rate=0., output_stride=32, global_pool='avg', norm_layer='batchnorm2d', norm_eps=1e-3, act_layer='relu', ): super(InceptionResnetV2, self).__init__() self.num_classes = num_classes self.num_features = self.head_hidden_size = 1536 assert output_stride == 32 conv_block = partial( ConvNormAct, padding=0, norm_layer=norm_layer, act_layer=act_layer, norm_kwargs=dict(eps=norm_eps), act_kwargs=dict(inplace=True), ) self.conv2d_1a = conv_block(in_chans, 32, kernel_size=3, stride=2) self.conv2d_2a = conv_block(32, 32, kernel_size=3, stride=1) self.conv2d_2b = conv_block(32, 64, kernel_size=3, stride=1, padding=1) self.feature_info = [dict(num_chs=64, reduction=2, module='conv2d_2b')] self.maxpool_3a = nn.MaxPool2d(3, stride=2) self.conv2d_3b = conv_block(64, 80, kernel_size=1, stride=1) self.conv2d_4a = conv_block(80, 192, kernel_size=3, stride=1) self.feature_info += [dict(num_chs=192, reduction=4, module='conv2d_4a')] self.maxpool_5a = nn.MaxPool2d(3, stride=2) self.mixed_5b = Mixed_5b(conv_block=conv_block) self.repeat = nn.Sequential(*[Block35(scale=0.17, conv_block=conv_block) for _ in range(10)]) self.feature_info += [dict(num_chs=320, reduction=8, module='repeat')] self.mixed_6a = Mixed_6a(conv_block=conv_block) self.repeat_1 = nn.Sequential(*[Block17(scale=0.10, conv_block=conv_block) for _ in range(20)]) self.feature_info += [dict(num_chs=1088, reduction=16, module='repeat_1')] self.mixed_7a = Mixed_7a(conv_block=conv_block) self.repeat_2 = nn.Sequential(*[Block8(scale=0.20, conv_block=conv_block) for _ in range(9)]) self.block8 = Block8(no_relu=True, conv_block=conv_block) self.conv2d_7b = conv_block(2080, self.num_features, kernel_size=1, stride=1) self.feature_info += [dict(num_chs=self.num_features, reduction=32, module='conv2d_7b')] self.global_pool, self.head_drop, self.classif = create_classifier( self.num_features, self.num_classes, pool_type=global_pool, drop_rate=drop_rate) @torch.jit.ignore def group_matcher(self, coarse=False): module_map = {k: i for i, (k, _) in enumerate(flatten_modules(self.named_children(), prefix=()))} module_map.pop(('classif',)) def _matcher(name): if any([name.startswith(n) for n in ('conv2d_1', 'conv2d_2')]): return 0 elif any([name.startswith(n) for n in ('conv2d_3', 'conv2d_4')]): return 1 elif any([name.startswith(n) for n in ('block8', 'conv2d_7')]): return len(module_map) + 1 else: for k in module_map.keys(): if k == tuple(name.split('.')[:len(k)]): return module_map[k] return float('inf') return _matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, "checkpointing not supported" @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.classif def reset_classifier(self, num_classes: int, global_pool: str = 'avg'): self.num_classes = num_classes self.global_pool, self.classif = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): x = self.conv2d_1a(x) x = self.conv2d_2a(x) x = self.conv2d_2b(x) x = self.maxpool_3a(x) x = self.conv2d_3b(x) x = self.conv2d_4a(x) x = self.maxpool_5a(x) x = self.mixed_5b(x) x = self.repeat(x) x = self.mixed_6a(x) x = self.repeat_1(x) x = self.mixed_7a(x) x = self.repeat_2(x) x = self.block8(x) x = self.conv2d_7b(x) return x def forward_head(self, x, pre_logits: bool = False): x = self.global_pool(x) x = self.head_drop(x) return x if pre_logits else self.classif(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_inception_resnet_v2(variant, pretrained=False, **kwargs): return build_model_with_cfg(InceptionResnetV2, variant, pretrained, **kwargs) default_cfgs = generate_default_cfgs({ # ported from http://download.tensorflow.org/models/inception_resnet_v2_2016_08_30.tar.gz 'inception_resnet_v2.tf_in1k': { 'hf_hub_id': 'timm/', 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8), 'crop_pct': 0.8975, 'interpolation': 'bicubic', 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, 'first_conv': 'conv2d_1a.conv', 'classifier': 'classif', }, # As per https://arxiv.org/abs/1705.07204 and # ported from http://download.tensorflow.org/models/ens_adv_inception_resnet_v2_2017_08_18.tar.gz 'inception_resnet_v2.tf_ens_adv_in1k': { 'hf_hub_id': 'timm/', 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8), 'crop_pct': 0.8975, 'interpolation': 'bicubic', 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, 'first_conv': 'conv2d_1a.conv', 'classifier': 'classif', } }) @register_model def inception_resnet_v2(pretrained=False, **kwargs) -> InceptionResnetV2: return _create_inception_resnet_v2('inception_resnet_v2', pretrained=pretrained, **kwargs) register_model_deprecations(__name__, { 'ens_adv_inception_resnet_v2': 'inception_resnet_v2.tf_ens_adv_in1k', })
pytorch-image-models/timm/models/inception_resnet_v2.py/0
{ "file_path": "pytorch-image-models/timm/models/inception_resnet_v2.py", "repo_id": "pytorch-image-models", "token_count": 6034 }
""" Pooling-based Vision Transformer (PiT) in PyTorch A PyTorch implement of Pooling-based Vision Transformers as described in 'Rethinking Spatial Dimensions of Vision Transformers' - https://arxiv.org/abs/2103.16302 This code was adapted from the original version at https://github.com/naver-ai/pit, original copyright below. Modifications for timm by / Copyright 2020 Ross Wightman """ # PiT # Copyright 2021-present NAVER Corp. # Apache License v2.0 import math import re from functools import partial from typing import Optional, Sequence, Tuple import torch from torch import nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import trunc_normal_, to_2tuple from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs from .vision_transformer import Block __all__ = ['PoolingVisionTransformer'] # model_registry will add each entrypoint fn to this class SequentialTuple(nn.Sequential): """ This module exists to work around torchscript typing issues list -> list""" def __init__(self, *args): super(SequentialTuple, self).__init__(*args) def forward(self, x: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]: for module in self: x = module(x) return x class Transformer(nn.Module): def __init__( self, base_dim, depth, heads, mlp_ratio, pool=None, proj_drop=.0, attn_drop=.0, drop_path_prob=None, norm_layer=None, ): super(Transformer, self).__init__() embed_dim = base_dim * heads self.pool = pool self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() self.blocks = nn.Sequential(*[ Block( dim=embed_dim, num_heads=heads, mlp_ratio=mlp_ratio, qkv_bias=True, proj_drop=proj_drop, attn_drop=attn_drop, drop_path=drop_path_prob[i], norm_layer=partial(nn.LayerNorm, eps=1e-6) ) for i in range(depth)]) def forward(self, x: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]: x, cls_tokens = x token_length = cls_tokens.shape[1] if self.pool is not None: x, cls_tokens = self.pool(x, cls_tokens) B, C, H, W = x.shape x = x.flatten(2).transpose(1, 2) x = torch.cat((cls_tokens, x), dim=1) x = self.norm(x) x = self.blocks(x) cls_tokens = x[:, :token_length] x = x[:, token_length:] x = x.transpose(1, 2).reshape(B, C, H, W) return x, cls_tokens class Pooling(nn.Module): def __init__(self, in_feature, out_feature, stride, padding_mode='zeros'): super(Pooling, self).__init__() self.conv = nn.Conv2d( in_feature, out_feature, kernel_size=stride + 1, padding=stride // 2, stride=stride, padding_mode=padding_mode, groups=in_feature, ) self.fc = nn.Linear(in_feature, out_feature) def forward(self, x, cls_token) -> Tuple[torch.Tensor, torch.Tensor]: x = self.conv(x) cls_token = self.fc(cls_token) return x, cls_token class ConvEmbedding(nn.Module): def __init__( self, in_channels, out_channels, img_size: int = 224, patch_size: int = 16, stride: int = 8, padding: int = 0, ): super(ConvEmbedding, self).__init__() padding = padding self.img_size = to_2tuple(img_size) self.patch_size = to_2tuple(patch_size) self.height = math.floor((self.img_size[0] + 2 * padding - self.patch_size[0]) / stride + 1) self.width = math.floor((self.img_size[1] + 2 * padding - self.patch_size[1]) / stride + 1) self.grid_size = (self.height, self.width) self.conv = nn.Conv2d( in_channels, out_channels, kernel_size=patch_size, stride=stride, padding=padding, bias=True) def forward(self, x): x = self.conv(x) return x class PoolingVisionTransformer(nn.Module): """ Pooling-based Vision Transformer A PyTorch implement of 'Rethinking Spatial Dimensions of Vision Transformers' - https://arxiv.org/abs/2103.16302 """ def __init__( self, img_size: int = 224, patch_size: int = 16, stride: int = 8, stem_type: str = 'overlap', base_dims: Sequence[int] = (48, 48, 48), depth: Sequence[int] = (2, 6, 4), heads: Sequence[int] = (2, 4, 8), mlp_ratio: float = 4, num_classes=1000, in_chans=3, global_pool='token', distilled=False, drop_rate=0., pos_drop_drate=0., proj_drop_rate=0., attn_drop_rate=0., drop_path_rate=0., ): super(PoolingVisionTransformer, self).__init__() assert global_pool in ('token',) self.base_dims = base_dims self.heads = heads embed_dim = base_dims[0] * heads[0] self.num_classes = num_classes self.global_pool = global_pool self.num_tokens = 2 if distilled else 1 self.feature_info = [] self.patch_embed = ConvEmbedding(in_chans, embed_dim, img_size, patch_size, stride) self.pos_embed = nn.Parameter(torch.randn(1, embed_dim, self.patch_embed.height, self.patch_embed.width)) self.cls_token = nn.Parameter(torch.randn(1, self.num_tokens, embed_dim)) self.pos_drop = nn.Dropout(p=pos_drop_drate) transformers = [] # stochastic depth decay rule dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depth)).split(depth)] prev_dim = embed_dim for i in range(len(depth)): pool = None embed_dim = base_dims[i] * heads[i] if i > 0: pool = Pooling( prev_dim, embed_dim, stride=2, ) transformers += [Transformer( base_dims[i], depth[i], heads[i], mlp_ratio, pool=pool, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path_prob=dpr[i], )] prev_dim = embed_dim self.feature_info += [dict(num_chs=prev_dim, reduction=(stride - 1) * 2**i, module=f'transformers.{i}')] self.transformers = SequentialTuple(*transformers) self.norm = nn.LayerNorm(base_dims[-1] * heads[-1], eps=1e-6) self.num_features = self.head_hidden_size = self.embed_dim = embed_dim # Classifier head self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() self.head_dist = None if distilled: self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() self.distilled_training = False # must set this True to train w/ distillation token trunc_normal_(self.pos_embed, std=.02) trunc_normal_(self.cls_token, std=.02) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) @torch.jit.ignore def no_weight_decay(self): return {'pos_embed', 'cls_token'} @torch.jit.ignore def set_distilled_training(self, enable=True): self.distilled_training = enable @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' def get_classifier(self) -> nn.Module: if self.head_dist is not None: return self.head, self.head_dist else: return self.head def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): self.num_classes = num_classes if global_pool is not None: self.global_pool = global_pool self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() if self.head_dist is not None: self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() def forward_features(self, x): x = self.patch_embed(x) x = self.pos_drop(x + self.pos_embed) cls_tokens = self.cls_token.expand(x.shape[0], -1, -1) x, cls_tokens = self.transformers((x, cls_tokens)) cls_tokens = self.norm(cls_tokens) return cls_tokens def forward_head(self, x, pre_logits: bool = False) -> torch.Tensor: if self.head_dist is not None: assert self.global_pool == 'token' x, x_dist = x[:, 0], x[:, 1] x = self.head_drop(x) x_dist = self.head_drop(x) if not pre_logits: x = self.head(x) x_dist = self.head_dist(x_dist) if self.distilled_training and self.training and not torch.jit.is_scripting(): # only return separate classification predictions when training in distilled mode return x, x_dist else: # during standard train / finetune, inference average the classifier predictions return (x + x_dist) / 2 else: if self.global_pool == 'token': x = x[:, 0] x = self.head_drop(x) if not pre_logits: x = self.head(x) return x def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict, model): """ preprocess checkpoints """ out_dict = {} p_blocks = re.compile(r'pools\.(\d)\.') for k, v in state_dict.items(): # FIXME need to update resize for PiT impl # if k == 'pos_embed' and v.shape != model.pos_embed.shape: # # To resize pos embedding when using model at different size from pretrained weights # v = resize_pos_embed(v, model.pos_embed) k = p_blocks.sub(lambda exp: f'transformers.{int(exp.group(1)) + 1}.pool.', k) out_dict[k] = v return out_dict def _create_pit(variant, pretrained=False, **kwargs): default_out_indices = tuple(range(3)) out_indices = kwargs.pop('out_indices', default_out_indices) model = build_model_with_cfg( PoolingVisionTransformer, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(feature_cls='hook', no_rewrite=True, out_indices=out_indices), **kwargs, ) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.conv', 'classifier': 'head', **kwargs } default_cfgs = generate_default_cfgs({ # deit models (FB weights) 'pit_ti_224.in1k': _cfg(hf_hub_id='timm/'), 'pit_xs_224.in1k': _cfg(hf_hub_id='timm/'), 'pit_s_224.in1k': _cfg(hf_hub_id='timm/'), 'pit_b_224.in1k': _cfg(hf_hub_id='timm/'), 'pit_ti_distilled_224.in1k': _cfg( hf_hub_id='timm/', classifier=('head', 'head_dist')), 'pit_xs_distilled_224.in1k': _cfg( hf_hub_id='timm/', classifier=('head', 'head_dist')), 'pit_s_distilled_224.in1k': _cfg( hf_hub_id='timm/', classifier=('head', 'head_dist')), 'pit_b_distilled_224.in1k': _cfg( hf_hub_id='timm/', classifier=('head', 'head_dist')), }) @register_model def pit_b_224(pretrained=False, **kwargs) -> PoolingVisionTransformer: model_args = dict( patch_size=14, stride=7, base_dims=[64, 64, 64], depth=[3, 6, 4], heads=[4, 8, 16], mlp_ratio=4, ) return _create_pit('pit_b_224', pretrained, **dict(model_args, **kwargs)) @register_model def pit_s_224(pretrained=False, **kwargs) -> PoolingVisionTransformer: model_args = dict( patch_size=16, stride=8, base_dims=[48, 48, 48], depth=[2, 6, 4], heads=[3, 6, 12], mlp_ratio=4, ) return _create_pit('pit_s_224', pretrained, **dict(model_args, **kwargs)) @register_model def pit_xs_224(pretrained=False, **kwargs) -> PoolingVisionTransformer: model_args = dict( patch_size=16, stride=8, base_dims=[48, 48, 48], depth=[2, 6, 4], heads=[2, 4, 8], mlp_ratio=4, ) return _create_pit('pit_xs_224', pretrained, **dict(model_args, **kwargs)) @register_model def pit_ti_224(pretrained=False, **kwargs) -> PoolingVisionTransformer: model_args = dict( patch_size=16, stride=8, base_dims=[32, 32, 32], depth=[2, 6, 4], heads=[2, 4, 8], mlp_ratio=4, ) return _create_pit('pit_ti_224', pretrained, **dict(model_args, **kwargs)) @register_model def pit_b_distilled_224(pretrained=False, **kwargs) -> PoolingVisionTransformer: model_args = dict( patch_size=14, stride=7, base_dims=[64, 64, 64], depth=[3, 6, 4], heads=[4, 8, 16], mlp_ratio=4, distilled=True, ) return _create_pit('pit_b_distilled_224', pretrained, **dict(model_args, **kwargs)) @register_model def pit_s_distilled_224(pretrained=False, **kwargs) -> PoolingVisionTransformer: model_args = dict( patch_size=16, stride=8, base_dims=[48, 48, 48], depth=[2, 6, 4], heads=[3, 6, 12], mlp_ratio=4, distilled=True, ) return _create_pit('pit_s_distilled_224', pretrained, **dict(model_args, **kwargs)) @register_model def pit_xs_distilled_224(pretrained=False, **kwargs) -> PoolingVisionTransformer: model_args = dict( patch_size=16, stride=8, base_dims=[48, 48, 48], depth=[2, 6, 4], heads=[2, 4, 8], mlp_ratio=4, distilled=True, ) return _create_pit('pit_xs_distilled_224', pretrained, **dict(model_args, **kwargs)) @register_model def pit_ti_distilled_224(pretrained=False, **kwargs) -> PoolingVisionTransformer: model_args = dict( patch_size=16, stride=8, base_dims=[32, 32, 32], depth=[2, 6, 4], heads=[2, 4, 8], mlp_ratio=4, distilled=True, ) return _create_pit('pit_ti_distilled_224', pretrained, **dict(model_args, **kwargs))
pytorch-image-models/timm/models/pit.py/0
{ "file_path": "pytorch-image-models/timm/models/pit.py", "repo_id": "pytorch-image-models", "token_count": 7404 }
""" Selective Kernel Networks (ResNet base) Paper: Selective Kernel Networks (https://arxiv.org/abs/1903.06586) This was inspired by reading 'Compounding the Performance Improvements...' (https://arxiv.org/abs/2001.06268) and a streamlined impl at https://github.com/clovaai/assembled-cnn but I ended up building something closer to the original paper with some modifications of my own to better balance param count vs accuracy. Hacked together by / Copyright 2020 Ross Wightman """ import math from torch import nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import SelectiveKernel, ConvNormAct, create_attn from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs from .resnet import ResNet class SelectiveKernelBasic(nn.Module): expansion = 1 def __init__( self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64, sk_kwargs=None, reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, attn_layer=None, aa_layer=None, drop_block=None, drop_path=None, ): super(SelectiveKernelBasic, self).__init__() sk_kwargs = sk_kwargs or {} conv_kwargs = dict(act_layer=act_layer, norm_layer=norm_layer) assert cardinality == 1, 'BasicBlock only supports cardinality of 1' assert base_width == 64, 'BasicBlock doest not support changing base width' first_planes = planes // reduce_first outplanes = planes * self.expansion first_dilation = first_dilation or dilation self.conv1 = SelectiveKernel( inplanes, first_planes, stride=stride, dilation=first_dilation, aa_layer=aa_layer, drop_layer=drop_block, **conv_kwargs, **sk_kwargs) self.conv2 = ConvNormAct( first_planes, outplanes, kernel_size=3, dilation=dilation, apply_act=False, **conv_kwargs) self.se = create_attn(attn_layer, outplanes) self.act = act_layer(inplace=True) self.downsample = downsample self.drop_path = drop_path def zero_init_last(self): if getattr(self.conv2.bn, 'weight', None) is not None: nn.init.zeros_(self.conv2.bn.weight) def forward(self, x): shortcut = x x = self.conv1(x) x = self.conv2(x) if self.se is not None: x = self.se(x) if self.drop_path is not None: x = self.drop_path(x) if self.downsample is not None: shortcut = self.downsample(shortcut) x += shortcut x = self.act(x) return x class SelectiveKernelBottleneck(nn.Module): expansion = 4 def __init__( self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64, sk_kwargs=None, reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, attn_layer=None, aa_layer=None, drop_block=None, drop_path=None, ): super(SelectiveKernelBottleneck, self).__init__() sk_kwargs = sk_kwargs or {} conv_kwargs = dict(act_layer=act_layer, norm_layer=norm_layer) width = int(math.floor(planes * (base_width / 64)) * cardinality) first_planes = width // reduce_first outplanes = planes * self.expansion first_dilation = first_dilation or dilation self.conv1 = ConvNormAct(inplanes, first_planes, kernel_size=1, **conv_kwargs) self.conv2 = SelectiveKernel( first_planes, width, stride=stride, dilation=first_dilation, groups=cardinality, aa_layer=aa_layer, drop_layer=drop_block, **conv_kwargs, **sk_kwargs) self.conv3 = ConvNormAct(width, outplanes, kernel_size=1, apply_act=False, **conv_kwargs) self.se = create_attn(attn_layer, outplanes) self.act = act_layer(inplace=True) self.downsample = downsample self.drop_path = drop_path def zero_init_last(self): if getattr(self.conv3.bn, 'weight', None) is not None: nn.init.zeros_(self.conv3.bn.weight) def forward(self, x): shortcut = x x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) if self.se is not None: x = self.se(x) if self.drop_path is not None: x = self.drop_path(x) if self.downsample is not None: shortcut = self.downsample(shortcut) x += shortcut x = self.act(x) return x def _create_skresnet(variant, pretrained=False, **kwargs): return build_model_with_cfg( ResNet, variant, pretrained, **kwargs, ) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'conv1', 'classifier': 'fc', **kwargs } default_cfgs = generate_default_cfgs({ 'skresnet18.ra_in1k': _cfg(hf_hub_id='timm/'), 'skresnet34.ra_in1k': _cfg(hf_hub_id='timm/'), 'skresnet50.untrained': _cfg(), 'skresnet50d.untrained': _cfg( first_conv='conv1.0'), 'skresnext50_32x4d.ra_in1k': _cfg(hf_hub_id='timm/'), }) @register_model def skresnet18(pretrained=False, **kwargs) -> ResNet: """Constructs a Selective Kernel ResNet-18 model. Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this variation splits the input channels to the selective convolutions to keep param count down. """ sk_kwargs = dict(rd_ratio=1 / 8, rd_divisor=16, split_input=True) model_args = dict( block=SelectiveKernelBasic, layers=[2, 2, 2, 2], block_args=dict(sk_kwargs=sk_kwargs), zero_init_last=False, **kwargs) return _create_skresnet('skresnet18', pretrained, **model_args) @register_model def skresnet34(pretrained=False, **kwargs) -> ResNet: """Constructs a Selective Kernel ResNet-34 model. Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this variation splits the input channels to the selective convolutions to keep param count down. """ sk_kwargs = dict(rd_ratio=1 / 8, rd_divisor=16, split_input=True) model_args = dict( block=SelectiveKernelBasic, layers=[3, 4, 6, 3], block_args=dict(sk_kwargs=sk_kwargs), zero_init_last=False, **kwargs) return _create_skresnet('skresnet34', pretrained, **model_args) @register_model def skresnet50(pretrained=False, **kwargs) -> ResNet: """Constructs a Select Kernel ResNet-50 model. Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this variation splits the input channels to the selective convolutions to keep param count down. """ sk_kwargs = dict(split_input=True) model_args = dict( block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], block_args=dict(sk_kwargs=sk_kwargs), zero_init_last=False, **kwargs) return _create_skresnet('skresnet50', pretrained, **model_args) @register_model def skresnet50d(pretrained=False, **kwargs) -> ResNet: """Constructs a Select Kernel ResNet-50-D model. Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this variation splits the input channels to the selective convolutions to keep param count down. """ sk_kwargs = dict(split_input=True) model_args = dict( block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, block_args=dict(sk_kwargs=sk_kwargs), zero_init_last=False, **kwargs) return _create_skresnet('skresnet50d', pretrained, **model_args) @register_model def skresnext50_32x4d(pretrained=False, **kwargs) -> ResNet: """Constructs a Select Kernel ResNeXt50-32x4d model. This should be equivalent to the SKNet-50 model in the Select Kernel Paper """ sk_kwargs = dict(rd_ratio=1/16, rd_divisor=32, split_input=False) model_args = dict( block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, block_args=dict(sk_kwargs=sk_kwargs), zero_init_last=False, **kwargs) return _create_skresnet('skresnext50_32x4d', pretrained, **model_args)
pytorch-image-models/timm/models/sknet.py/0
{ "file_path": "pytorch-image-models/timm/models/sknet.py", "repo_id": "pytorch-image-models", "token_count": 3801 }
""" VoVNet (V1 & V2) Papers: * `An Energy and GPU-Computation Efficient Backbone Network` - https://arxiv.org/abs/1904.09730 * `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667 Looked at https://github.com/youngwanLEE/vovnet-detectron2 & https://github.com/stigma0617/VoVNet.pytorch/blob/master/models_vovnet/vovnet.py for some reference, rewrote most of the code. Hacked together by / Copyright 2020 Ross Wightman """ from typing import List, Optional import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import ConvNormAct, SeparableConvNormAct, BatchNormAct2d, ClassifierHead, DropPath, \ create_attn, create_norm_act_layer from ._builder import build_model_with_cfg from ._manipulate import checkpoint_seq from ._registry import register_model, generate_default_cfgs __all__ = ['VovNet'] # model_registry will add each entrypoint fn to this class SequentialAppendList(nn.Sequential): def __init__(self, *args): super(SequentialAppendList, self).__init__(*args) def forward(self, x: torch.Tensor, concat_list: List[torch.Tensor]) -> torch.Tensor: for i, module in enumerate(self): if i == 0: concat_list.append(module(x)) else: concat_list.append(module(concat_list[-1])) x = torch.cat(concat_list, dim=1) return x class OsaBlock(nn.Module): def __init__( self, in_chs, mid_chs, out_chs, layer_per_block, residual=False, depthwise=False, attn='', norm_layer=BatchNormAct2d, act_layer=nn.ReLU, drop_path=None, ): super(OsaBlock, self).__init__() self.residual = residual self.depthwise = depthwise conv_kwargs = dict(norm_layer=norm_layer, act_layer=act_layer) next_in_chs = in_chs if self.depthwise and next_in_chs != mid_chs: assert not residual self.conv_reduction = ConvNormAct(next_in_chs, mid_chs, 1, **conv_kwargs) else: self.conv_reduction = None mid_convs = [] for i in range(layer_per_block): if self.depthwise: conv = SeparableConvNormAct(mid_chs, mid_chs, **conv_kwargs) else: conv = ConvNormAct(next_in_chs, mid_chs, 3, **conv_kwargs) next_in_chs = mid_chs mid_convs.append(conv) self.conv_mid = SequentialAppendList(*mid_convs) # feature aggregation next_in_chs = in_chs + layer_per_block * mid_chs self.conv_concat = ConvNormAct(next_in_chs, out_chs, **conv_kwargs) self.attn = create_attn(attn, out_chs) if attn else None self.drop_path = drop_path def forward(self, x): output = [x] if self.conv_reduction is not None: x = self.conv_reduction(x) x = self.conv_mid(x, output) x = self.conv_concat(x) if self.attn is not None: x = self.attn(x) if self.drop_path is not None: x = self.drop_path(x) if self.residual: x = x + output[0] return x class OsaStage(nn.Module): def __init__( self, in_chs, mid_chs, out_chs, block_per_stage, layer_per_block, downsample=True, residual=True, depthwise=False, attn='ese', norm_layer=BatchNormAct2d, act_layer=nn.ReLU, drop_path_rates=None, ): super(OsaStage, self).__init__() self.grad_checkpointing = False if downsample: self.pool = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) else: self.pool = None blocks = [] for i in range(block_per_stage): last_block = i == block_per_stage - 1 if drop_path_rates is not None and drop_path_rates[i] > 0.: drop_path = DropPath(drop_path_rates[i]) else: drop_path = None blocks += [OsaBlock( in_chs, mid_chs, out_chs, layer_per_block, residual=residual and i > 0, depthwise=depthwise, attn=attn if last_block else '', norm_layer=norm_layer, act_layer=act_layer, drop_path=drop_path )] in_chs = out_chs self.blocks = nn.Sequential(*blocks) def forward(self, x): if self.pool is not None: x = self.pool(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) return x class VovNet(nn.Module): def __init__( self, cfg, in_chans=3, num_classes=1000, global_pool='avg', output_stride=32, norm_layer=BatchNormAct2d, act_layer=nn.ReLU, drop_rate=0., drop_path_rate=0., **kwargs, ): """ Args: cfg (dict): Model architecture configuration in_chans (int): Number of input channels (default: 3) num_classes (int): Number of classifier classes (default: 1000) global_pool (str): Global pooling type (default: 'avg') output_stride (int): Output stride of network, one of (8, 16, 32) (default: 32) norm_layer (Union[str, nn.Module]): normalization layer act_layer (Union[str, nn.Module]): activation layer drop_rate (float): Dropout rate (default: 0.) drop_path_rate (float): Stochastic depth drop-path rate (default: 0.) kwargs (dict): Extra kwargs overlayed onto cfg """ super(VovNet, self).__init__() self.num_classes = num_classes self.drop_rate = drop_rate assert output_stride == 32 # FIXME support dilation cfg = dict(cfg, **kwargs) stem_stride = cfg.get("stem_stride", 4) stem_chs = cfg["stem_chs"] stage_conv_chs = cfg["stage_conv_chs"] stage_out_chs = cfg["stage_out_chs"] block_per_stage = cfg["block_per_stage"] layer_per_block = cfg["layer_per_block"] conv_kwargs = dict(norm_layer=norm_layer, act_layer=act_layer) # Stem module last_stem_stride = stem_stride // 2 conv_type = SeparableConvNormAct if cfg["depthwise"] else ConvNormAct self.stem = nn.Sequential(*[ ConvNormAct(in_chans, stem_chs[0], 3, stride=2, **conv_kwargs), conv_type(stem_chs[0], stem_chs[1], 3, stride=1, **conv_kwargs), conv_type(stem_chs[1], stem_chs[2], 3, stride=last_stem_stride, **conv_kwargs), ]) self.feature_info = [dict( num_chs=stem_chs[1], reduction=2, module=f'stem.{1 if stem_stride == 4 else 2}')] current_stride = stem_stride # OSA stages stage_dpr = torch.split(torch.linspace(0, drop_path_rate, sum(block_per_stage)), block_per_stage) in_ch_list = stem_chs[-1:] + stage_out_chs[:-1] stage_args = dict(residual=cfg["residual"], depthwise=cfg["depthwise"], attn=cfg["attn"], **conv_kwargs) stages = [] for i in range(4): # num_stages downsample = stem_stride == 2 or i > 0 # first stage has no stride/downsample if stem_stride is 4 stages += [OsaStage( in_ch_list[i], stage_conv_chs[i], stage_out_chs[i], block_per_stage[i], layer_per_block, downsample=downsample, drop_path_rates=stage_dpr[i], **stage_args, )] self.num_features = stage_out_chs[i] current_stride *= 2 if downsample else 1 self.feature_info += [dict(num_chs=self.num_features, reduction=current_stride, module=f'stages.{i}')] self.stages = nn.Sequential(*stages) self.head_hidden_size = self.num_features self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate) for n, m in self.named_modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.Linear): nn.init.zeros_(m.bias) @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^stem', blocks=r'^stages\.(\d+)' if coarse else r'^stages\.(\d+).blocks\.(\d+)', ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in self.stages: s.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head.fc def reset_classifier(self, num_classes, global_pool: Optional[str] = None): self.num_classes = num_classes self.head.reset(num_classes, global_pool) def forward_features(self, x): x = self.stem(x) return self.stages(x) def forward_head(self, x, pre_logits: bool = False): return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x # model cfgs adapted from https://github.com/youngwanLEE/vovnet-detectron2 & # https://github.com/stigma0617/VoVNet.pytorch/blob/master/models_vovnet/vovnet.py model_cfgs = dict( vovnet39a=dict( stem_chs=[64, 64, 128], stage_conv_chs=[128, 160, 192, 224], stage_out_chs=[256, 512, 768, 1024], layer_per_block=5, block_per_stage=[1, 1, 2, 2], residual=False, depthwise=False, attn='', ), vovnet57a=dict( stem_chs=[64, 64, 128], stage_conv_chs=[128, 160, 192, 224], stage_out_chs=[256, 512, 768, 1024], layer_per_block=5, block_per_stage=[1, 1, 4, 3], residual=False, depthwise=False, attn='', ), ese_vovnet19b_slim_dw=dict( stem_chs=[64, 64, 64], stage_conv_chs=[64, 80, 96, 112], stage_out_chs=[112, 256, 384, 512], layer_per_block=3, block_per_stage=[1, 1, 1, 1], residual=True, depthwise=True, attn='ese', ), ese_vovnet19b_dw=dict( stem_chs=[64, 64, 64], stage_conv_chs=[128, 160, 192, 224], stage_out_chs=[256, 512, 768, 1024], layer_per_block=3, block_per_stage=[1, 1, 1, 1], residual=True, depthwise=True, attn='ese', ), ese_vovnet19b_slim=dict( stem_chs=[64, 64, 128], stage_conv_chs=[64, 80, 96, 112], stage_out_chs=[112, 256, 384, 512], layer_per_block=3, block_per_stage=[1, 1, 1, 1], residual=True, depthwise=False, attn='ese', ), ese_vovnet19b=dict( stem_chs=[64, 64, 128], stage_conv_chs=[128, 160, 192, 224], stage_out_chs=[256, 512, 768, 1024], layer_per_block=3, block_per_stage=[1, 1, 1, 1], residual=True, depthwise=False, attn='ese', ), ese_vovnet39b=dict( stem_chs=[64, 64, 128], stage_conv_chs=[128, 160, 192, 224], stage_out_chs=[256, 512, 768, 1024], layer_per_block=5, block_per_stage=[1, 1, 2, 2], residual=True, depthwise=False, attn='ese', ), ese_vovnet57b=dict( stem_chs=[64, 64, 128], stage_conv_chs=[128, 160, 192, 224], stage_out_chs=[256, 512, 768, 1024], layer_per_block=5, block_per_stage=[1, 1, 4, 3], residual=True, depthwise=False, attn='ese', ), ese_vovnet99b=dict( stem_chs=[64, 64, 128], stage_conv_chs=[128, 160, 192, 224], stage_out_chs=[256, 512, 768, 1024], layer_per_block=5, block_per_stage=[1, 3, 9, 3], residual=True, depthwise=False, attn='ese', ), eca_vovnet39b=dict( stem_chs=[64, 64, 128], stage_conv_chs=[128, 160, 192, 224], stage_out_chs=[256, 512, 768, 1024], layer_per_block=5, block_per_stage=[1, 1, 2, 2], residual=True, depthwise=False, attn='eca', ), ) model_cfgs['ese_vovnet39b_evos'] = model_cfgs['ese_vovnet39b'] def _create_vovnet(variant, pretrained=False, **kwargs): return build_model_with_cfg( VovNet, variant, pretrained, model_cfg=model_cfgs[variant], feature_cfg=dict(flatten_sequential=True), **kwargs, ) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.0.conv', 'classifier': 'head.fc', **kwargs, } default_cfgs = generate_default_cfgs({ 'vovnet39a.untrained': _cfg(url=''), 'vovnet57a.untrained': _cfg(url=''), 'ese_vovnet19b_slim_dw.untrained': _cfg(url=''), 'ese_vovnet19b_dw.ra_in1k': _cfg( hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'ese_vovnet19b_slim.untrained': _cfg(url=''), 'ese_vovnet39b.ra_in1k': _cfg( hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'ese_vovnet57b.ra4_e3600_r256_in1k': _cfg( hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.95, input_size=(3, 256, 256), pool_size=(8, 8), test_input_size=(3, 320, 320), test_crop_pct=1.0 ), 'ese_vovnet99b.untrained': _cfg(url=''), 'eca_vovnet39b.untrained': _cfg(url=''), 'ese_vovnet39b_evos.untrained': _cfg(url=''), }) @register_model def vovnet39a(pretrained=False, **kwargs) -> VovNet: return _create_vovnet('vovnet39a', pretrained=pretrained, **kwargs) @register_model def vovnet57a(pretrained=False, **kwargs) -> VovNet: return _create_vovnet('vovnet57a', pretrained=pretrained, **kwargs) @register_model def ese_vovnet19b_slim_dw(pretrained=False, **kwargs) -> VovNet: return _create_vovnet('ese_vovnet19b_slim_dw', pretrained=pretrained, **kwargs) @register_model def ese_vovnet19b_dw(pretrained=False, **kwargs) -> VovNet: return _create_vovnet('ese_vovnet19b_dw', pretrained=pretrained, **kwargs) @register_model def ese_vovnet19b_slim(pretrained=False, **kwargs) -> VovNet: return _create_vovnet('ese_vovnet19b_slim', pretrained=pretrained, **kwargs) @register_model def ese_vovnet39b(pretrained=False, **kwargs) -> VovNet: return _create_vovnet('ese_vovnet39b', pretrained=pretrained, **kwargs) @register_model def ese_vovnet57b(pretrained=False, **kwargs) -> VovNet: return _create_vovnet('ese_vovnet57b', pretrained=pretrained, **kwargs) @register_model def ese_vovnet99b(pretrained=False, **kwargs) -> VovNet: return _create_vovnet('ese_vovnet99b', pretrained=pretrained, **kwargs) @register_model def eca_vovnet39b(pretrained=False, **kwargs) -> VovNet: return _create_vovnet('eca_vovnet39b', pretrained=pretrained, **kwargs) # Experimental Models @register_model def ese_vovnet39b_evos(pretrained=False, **kwargs) -> VovNet: def norm_act_fn(num_features, **nkwargs): return create_norm_act_layer('evonorms0', num_features, jit=False, **nkwargs) return _create_vovnet('ese_vovnet39b_evos', pretrained=pretrained, norm_layer=norm_act_fn, **kwargs)
pytorch-image-models/timm/models/vovnet.py/0
{ "file_path": "pytorch-image-models/timm/models/vovnet.py", "repo_id": "pytorch-image-models", "token_count": 8067 }
""" PyTorch Implementation of the Kron (PSGD) optimizer This is a PSGD optimizer using a Kronecker-factored preconditioner. This impl was adapted from https://github.com/evanatyourservice/kron_torch by Evan Walters, licensed CC-BY-4.0. Contributions to above also made by * Lucas Nestler, added to his https://github.com/ClashLuke/HeavyBall implementation. * Omead Pooladzandi https://github.com/opooladz The above work drew from https://github.com/lixilinx/psgd_torch by Xi-Lin Li This `timm` impl * works with a wider variety of torch versions * fixes some checkpoint save/restore (resume issues) * adds decoupled weight-decay option * has some refactoring, cleanup of args, default/group items * warning about not having opt_einsum (unusable without) """ import logging import string import random import warnings from typing import Any, Callable, Dict, Optional, Tuple, Union import numpy as np import torch try: # NOTE opt_einsum needed to avoid blowing up memory with einsum ops import opt_einsum import torch.backends.opt_einsum torch.backends.opt_einsum.enabled = True torch.backends.opt_einsum.strategy = "auto-hq" has_opt_einsum = True except ImportError: has_opt_einsum = False try: torch._dynamo.config.cache_size_limit = 1_000_000 has_dynamo = True except AttributeError: has_dynamo = False from ._types import ParamsT _logger = logging.getLogger(__name__) def precond_update_prob_schedule( n: float, max_prob: float = 1.0, min_prob: float = 0.03, decay: float = 0.001, flat_start: float = 500, ) -> torch.Tensor: """Anneal preconditioner update probability during beginning of training. PSGD benefits from more preconditioner updates at the beginning of training, but once the preconditioner is learned the update probability can drop low. This schedule is an exponential anneal with a flat start. Default settings keep update probability at 1.0 for 200 steps then exponentially anneal down to `min_prob` by 4000 steps. Default settings work very well for most models and training regimes. """ """Exponential anneal with flat start.""" n = torch.tensor(n, dtype=torch.float32) prob = max_prob * torch.exp(-decay * (n - flat_start)) prob.clamp_(min=min_prob, max=max_prob) return prob class Kron(torch.optim.Optimizer): """Implements PSGD Kron from https://github.com/lixilinx/psgd_torch. Args: params: Iterable of parameters to optimize or dicts defining parameter groups. lr: Learning rate. momentum: Momentum parameter. weight_decay: Weight decay. preconditioner_update_probability: Probability of updating the preconditioner. If None, defaults to a schedule that anneals from 1.0 to 0.03 by 4000 steps. max_size_triangular: Max size for dim's preconditioner to be triangular. min_ndim_triangular: Minimum number of dimensions a layer needs to have triangular preconditioners. memory_save_mode: 'one_diag', 'smart_one_diag', or 'all_diag', None is default to set all preconditioners to be triangular, 'one_diag' sets the largest or last dim to be diagonal per layer, and 'all_diag' sets all preconditioners to be diagonal. momentum_into_precond_update: whether to send momentum into preconditioner update instead of raw gradients. mu_dtype: Dtype of the momentum accumulator. precond_dtype: Dtype of the preconditioner. decoupled_decay: AdamW style decoupled weight decay flatten: Flatten dimensions instead of fully relying on expressions for higher rank params flatten_start_dim: Start of flatten range, defaults to 2. Seems good tradeoff for ConvNets. flatten_end_dim: End of flatten range, defaults to -1. stochastic_weight_decay: Enable random modulation of weight decay deterministic: Deterministic behaviour across save / load (resume). FIXME slow, needs work """ def __init__( self, params: ParamsT, lr: float = 0.001, momentum: float = 0.9, weight_decay: float = 0.0, preconditioner_update_probability: Optional[Union[Callable, float]] = None, max_size_triangular: int = 2048, min_ndim_triangular: int = 2, memory_save_mode: Optional[str] = None, momentum_into_precond_update: bool = True, precond_lr: float = 0.1, precond_init_scale: float = 1.0, mu_dtype: Optional[torch.dtype] = None, precond_dtype: Optional[torch.dtype] = None, decoupled_decay: bool = False, flatten: bool = False, flatten_start_dim: int = 2, flatten_end_dim: int = -1, stochastic_weight_decay: bool = False, deterministic: bool = False, ): if not has_opt_einsum: warnings.warn("It is highly recommended to have 'opt_einsum' installed for this optimizer.") if not 0.0 <= lr: raise ValueError(f"Invalid learning rate: {lr}") if not 0.0 <= momentum < 1.0: raise ValueError(f"Invalid beta parameter: {momentum}") if not 0.0 <= weight_decay: raise ValueError(f"Invalid weight_decay value: {weight_decay}") defaults = dict( lr=lr, momentum=momentum, weight_decay=weight_decay, preconditioner_update_probability=preconditioner_update_probability, max_size_triangular=max_size_triangular, min_ndim_triangular=min_ndim_triangular, memory_save_mode=memory_save_mode, momentum_into_precond_update=momentum_into_precond_update, precond_lr=precond_lr, precond_init_scale=precond_init_scale, mu_dtype=mu_dtype, precond_dtype=precond_dtype, decoupled_decay=decoupled_decay, flatten=flatten, flatten_start_dim=flatten_start_dim, flatten_end_dim=flatten_end_dim, stochastic_weight_decay=stochastic_weight_decay, ) super(Kron, self).__init__(params, defaults) self._param_exprs = {} # cache for einsum expr self._tiny = torch.finfo(torch.bfloat16).tiny self.rng = random.Random(1337) self.deterministic = deterministic # make compile optional (for bwd compat) if has_dynamo: self._calc_A_and_conjB = torch.compile(_calc_A_and_conjB, fullgraph=True, dynamic=False) self._q_terms = torch.compile(_q_terms, fullgraph=True, dynamic=False) self._precond_grad = torch.compile(_precond_grad, fullgraph=True, dynamic=False) self._balance_Q = torch.compile(_balance_Q, fullgraph=True, dynamic=False) else: self._calc_A_and_conjB = _calc_A_and_conjB self._q_terms = _q_terms self._precond_grad = _precond_grad self._balance_Q = _balance_Q def __getstate__(self): _dict = super().__getstate__() _dict["rng"] = self.rng return _dict def state_dict(self) -> Dict[str, Any]: # Get the optimizer's state dict optimizer_state = super().state_dict() # Add the generator state optimizer_state['rng_state'] = self.rng.getstate() return optimizer_state def load_state_dict(self, state_dict: Dict[str, Any]) -> None: # Extract and remove the RNG state from the state dict rng_states = {} if 'rng_state' in state_dict: rng_states['rng_state'] = state_dict.pop('rng_state') # Load the optimizer state super().load_state_dict(state_dict) state_dict.update(rng_states) # add back # Restore the RNG state if it exists if 'rng_state' in rng_states: self.rng.setstate(rng_states['rng_state']) def __setstate__(self, state): super().__setstate__(state) self._param_exprs = {} @torch.no_grad() def step(self, closure=None): loss = None if closure is not None: with torch.enable_grad(): loss = closure() total_momentum_size = 0 total_momentum_mb = 0 total_precond_size = 0 total_precond_mb = 0 for group in self.param_groups: mu_dtype = group.get("mu_dtype") precond_dtype = group.get("precond_dtype", torch.float32) momentum_into_precond_update = group.get("momentum_into_precond_update", True) update_prob = group.get("preconditioner_update_probability", None) for p in group["params"]: if p.grad is None: continue grad = p.grad state = self.state[p] flattened = False if group['flatten']: grad = safe_flatten(grad, group["flatten_start_dim"], group["flatten_end_dim"]) flattened = True if len(state) == 0: state["step"] = 0 state["update_counter"] = 0 state["momentum_buffer"] = torch.zeros_like(grad, dtype=mu_dtype or grad.dtype) # init Q and einsum expressions on first step state["Q"], exprs = _init_Q_exprs( grad, group["precond_init_scale"], group["max_size_triangular"], group["min_ndim_triangular"], group["memory_save_mode"], dtype=precond_dtype, ) self._param_exprs[p] = exprs # Accumulate sizes for log momentum_size = state["momentum_buffer"].numel() momentum_mb = momentum_size * state["momentum_buffer"].element_size() / 2**20 total_momentum_size += momentum_size total_momentum_mb += momentum_mb precond_size = sum(q.numel() for q in state["Q"]) precond_mb = sum(q.numel() * q.element_size() for q in state["Q"]) / 2**20 total_precond_size += precond_size total_precond_mb += precond_mb elif p not in self._param_exprs: # init only the einsum expressions, called after state load, Q are loaded from state_dict exprs = _init_Q_exprs( grad, group["precond_init_scale"], group["max_size_triangular"], group["min_ndim_triangular"], group["memory_save_mode"], dtype=precond_dtype, init_q=False, ) self._param_exprs[p] = exprs else: # retrieve cached expressions exprs = self._param_exprs[p] # update preconditioners all together deterministically if update_prob is None: update_prob = precond_update_prob_schedule if callable(update_prob): update_prob = update_prob(state["step"]) state["update_counter"] += 1 do_update = state["update_counter"] >= 1 / update_prob if do_update: state["update_counter"] = 0 state["step"] += 1 # Update momentum buffer beta = group["momentum"] bias_correction = 1 - beta ** state["step"] momentum_buffer = state["momentum_buffer"] momentum_buffer.mul_(group["momentum"]).add_(grad, alpha=1 - group["momentum"]) # Restore momentum dtype if mu_dtype is not None: momentum_buffer.copy_(momentum_buffer.to(dtype=mu_dtype)) debiased_momentum = (momentum_buffer / bias_correction).to(dtype=precond_dtype) # Balance preconditioners roughly every 100 updates balance = self.rng.random() < 0.01 and do_update if grad.dim() > 1 and balance: self._balance_Q(state["Q"]) # Update preconditioner if do_update: exprA, exprGs, _ = exprs Q = state["Q"] if self.deterministic: torch_rng = torch.Generator(device=debiased_momentum.device) torch_rng.manual_seed(self.rng.randint(0, 2 ** 31)) else: torch_rng = None V = torch.randn( debiased_momentum.shape, generator=torch_rng, dtype=precond_dtype, device=debiased_momentum.device, ) G = debiased_momentum if momentum_into_precond_update else grad A, conjB = self._calc_A_and_conjB(exprA, G, Q, V) terms = self._q_terms(exprGs, A, conjB) for q, (term1, term2) in zip(Q, terms): tmp = term1 - term2 tmp *= group["precond_lr"] if q.dim() < 2: tmp *= q tmp /= (term1 + term2).norm(float("inf")) + self._tiny else: tmp = torch.triu(tmp) tmp /= _norm_lower_bound(term1 + term2) + self._tiny tmp @= q q.sub_(tmp) # Precondition gradients pre_grad = self._precond_grad( state["Q"], exprs, debiased_momentum, ).to(dtype=p.dtype) # RMS of pre_grad should be 1.0, so let's cap at 1.1 pre_grad.mul_(torch.clamp(1.1 / (pre_grad.square().mean().sqrt_() + 1e-8), max=1.0)) if flattened: pre_grad = pre_grad.view(p.shape) # Apply weight decay weight_decay = group["weight_decay"] if weight_decay != 0: if group["stochastic_weight_decay"]: weight_decay = 2 * self.rng.random() * weight_decay if group["decoupled_decay"]: p.mul_(1. - group["lr"] * weight_decay) else: pre_grad.add_(p, alpha=weight_decay) # Update parameters p.add_(pre_grad, alpha=-group["lr"]) if total_momentum_size > 0: _logger.info(f"PSGD Momentum buffer size: {total_momentum_size} elements, {total_momentum_mb:.2f} MB") _logger.info(f"PSGD Preconditioners size: {total_precond_size} elements, {total_precond_mb:.2f} MB") return loss def safe_flatten(tensor, start_dim=0, end_dim=-1): ndim = tensor.ndim # Convert negative end_dim to positive and clip to end end_dim = min(end_dim if end_dim >= 0 else ndim + end_dim, ndim - 1) # If tensor has fewer dims than start_dim or start > end, return tensor as is if ndim <= start_dim or start_dim > end_dim: return tensor # Now safe to flatten return tensor.flatten(start_dim, end_dim) def _init_Q_exprs( t, scale, max_size, min_ndim_triangular, memory_save_mode, dtype=None, init_q=True, ): """For a scalar or tensor t, we initialize its preconditioner Q and reusable einsum expressions for updating Q and preconditioning gradient. """ letters = string.ascii_lowercase + string.ascii_uppercase dtype = dtype if dtype is not None else t.dtype shape = t.shape Q = [] if len(shape) == 0: # scalar if init_q: Q.append(scale * torch.ones_like(t, dtype=dtype)) exprA = ",->" exprGs = [",->"] exprP = ",,->" else: # tensor if len(shape) > 13: raise ValueError(f"Got tensor with dim {len(t.shape)}; Einstein runs out of letters!") scale = scale ** (1 / len(shape)) if memory_save_mode is None: dim_diag = [False for _ in shape] elif memory_save_mode == "one_diag": rev_sorted_dims = np.argsort(shape)[::-1] dim_diag = [False for _ in shape] dim_diag[rev_sorted_dims[0]] = True elif memory_save_mode == "smart_one_diag": # addition proposed by Lucas Nestler rev_sorted_dims = np.argsort(shape)[::-1] sorted_shape = sorted(shape) dim_diag = [False for _ in shape] if len(shape) >= 2 and sorted_shape[-1] > sorted_shape[-2]: dim_diag[rev_sorted_dims[0]] = True elif memory_save_mode == "all_diag": dim_diag = [True for _ in shape] else: raise ValueError( f"Invalid memory_save_mode: {memory_save_mode}, must be one of [None, 'one_diag', 'all_diag']") piece1A, piece2A, piece3A = ([], "", "") exprGs = [] piece1P, piece2P, piece3P, piece4P = ([], [], "", "") for i, (size, dim_d) in enumerate(zip(shape, dim_diag)): if ( size == 1 or size > max_size or len(shape) < min_ndim_triangular or dim_d ): # use diagonal matrix as preconditioner for this dim if init_q: Q.append(scale * torch.ones(size, dtype=dtype, device=t.device)) piece1A.append(letters[i]) piece2A = piece2A + letters[i] piece3A = piece3A + letters[i] piece1 = "".join([letters[i + 13] if j == i else letters[j] for j in range(len(shape))]) subscripts = piece1 + "," + piece1 + "->" + letters[i + 13] exprGs.append(subscripts) piece1P.append(letters[i + 13]) piece2P.append(letters[i + 13]) piece3P = piece3P + letters[i + 13] piece4P = piece4P + letters[i + 13] else: # use triangular matrix as preconditioner for this dim if init_q: Q.append(scale * torch.eye(size, dtype=dtype, device=t.device)) piece1A.append(letters[i] + letters[i + 13]) piece2A = piece2A + letters[i + 13] piece3A = piece3A + letters[i] piece1 = "".join([letters[i + 13] if j == i else letters[j] for j in range(len(shape))]) piece2 = "".join([letters[i + 26] if j == i else letters[j] for j in range(len(shape))]) subscripts = piece1 + "," + piece2 + "->" + letters[i + 13] + letters[i + 26] exprGs.append(subscripts) a, b, c = (letters[i], letters[i + 13], letters[i + 26]) piece1P.append(a + b) piece2P.append(a + c) piece3P = piece3P + c piece4P = piece4P + b exprA = ",".join(piece1A) + "," + piece2A + "->" + piece3A exprP = ",".join(piece1P) + "," + ",".join(piece2P) + "," + piece3P + "->" + piece4P exprGs = tuple(exprGs) if init_q: return [Q, (exprA, exprGs, exprP)] else: return exprA, exprGs, exprP def _lb(A, max_abs): A = A / max_abs aa = torch.real(A * A.conj()) value0, i = torch.max(torch.sum(aa, dim=0), 0) value1, j = torch.max(torch.sum(aa, dim=1), 0) if value0 > value1: x = A[:, i].conj() @ A return max_abs * torch.linalg.vector_norm((x / torch.linalg.vector_norm(x)) @ A.H) else: x = A @ A[j].conj() return max_abs * torch.linalg.vector_norm(A.H @ (x / torch.linalg.vector_norm(x))) def _norm_lower_bound(A): """Cheap lower bound for the spectral norm of A.""" max_abs = A.norm(float("inf")) return torch.where(max_abs > 0, _lb(A, max_abs), max_abs) def _solve_triangular_right(X, A): """X @ inv(A)""" orig_dtype = X.dtype X = X.to(dtype=torch.float32) A = A.to(dtype=torch.float32) out = torch.linalg.solve_triangular(A, X.reshape(-1, X.size(-1)), upper=True, left=False).reshape_as(X) return out.to(dtype=orig_dtype) def _balance_Q(Q_in): norms = torch.stack([q.norm(float("inf")) for q in Q_in]) geometric_mean = norms.prod() ** (1 / len(Q_in)) norms = geometric_mean / norms for i, q in enumerate(Q_in): q.mul_(norms[i]) def _precond_grad(Q, exprs, G): """Precondition gradient G with preconditioner Q.""" return torch.einsum(exprs[-1], *[q.conj() for q in Q], *Q, G) def _calc_A_and_conjB(exprA, G, Q, V): A = torch.einsum(exprA, *Q, G) order = G.dim() p = tuple(range(order)) conjB = torch.permute(V.conj(), p[1:] + p[:1]) for i, q in enumerate(Q): conjB = conjB / q if q.dim() < 2 else _solve_triangular_right(conjB, q) if i < order - 1: conjB = torch.transpose(conjB, i, order - 1) return A, conjB def _q_terms(exprGs, A, conjB): terms = [] for exprG in exprGs: term1 = torch.einsum(exprG, A, A.conj()) term2 = torch.einsum(exprG, conjB.conj(), conjB) terms.append((term1, term2)) return terms
pytorch-image-models/timm/optim/kron.py/0
{ "file_path": "pytorch-image-models/timm/optim/kron.py", "repo_id": "pytorch-image-models", "token_count": 10686 }
""" Batch size decay and retry helpers. Copyright 2022 Ross Wightman """ import math def decay_batch_step(batch_size, num_intra_steps=2, no_odd=False): """ power of two batch-size decay with intra steps Decay by stepping between powers of 2: * determine power-of-2 floor of current batch size (base batch size) * divide above value by num_intra_steps to determine step size * floor batch_size to nearest multiple of step_size (from base batch size) Examples: num_steps == 4 --> 64, 56, 48, 40, 32, 28, 24, 20, 16, 14, 12, 10, 8, 7, 6, 5, 4, 3, 2, 1 num_steps (no_odd=True) == 4 --> 64, 56, 48, 40, 32, 28, 24, 20, 16, 14, 12, 10, 8, 6, 4, 2 num_steps == 2 --> 64, 48, 32, 24, 16, 12, 8, 6, 4, 3, 2, 1 num_steps == 1 --> 64, 32, 16, 8, 4, 2, 1 """ if batch_size <= 1: # return 0 for stopping value so easy to use in loop return 0 base_batch_size = int(2 ** (math.log(batch_size - 1) // math.log(2))) step_size = max(base_batch_size // num_intra_steps, 1) batch_size = base_batch_size + ((batch_size - base_batch_size - 1) // step_size) * step_size if no_odd and batch_size % 2: batch_size -= 1 return batch_size def check_batch_size_retry(error_str): """ check failure error string for conditions where batch decay retry should not be attempted """ error_str = error_str.lower() if 'required rank' in error_str: # Errors involving phrase 'required rank' typically happen when a conv is used that's # not compatible with channels_last memory format. return False if 'illegal' in error_str: # 'Illegal memory access' errors in CUDA typically leave process in unusable state return False return True
pytorch-image-models/timm/utils/decay_batch.py/0
{ "file_path": "pytorch-image-models/timm/utils/decay_batch.py", "repo_id": "pytorch-image-models", "token_count": 656 }
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Agentic RAG [[open-in-colab]] Retrieval-Augmented-Generation (RAG) is “using an LLM to answer a user query, but basing the answer on information retrieved from a knowledge base”. It has many advantages over using a vanilla or fine-tuned LLM: to name a few, it allows to ground the answer on true facts and reduce confabulations, it allows to provide the LLM with domain-specific knowledge, and it allows fine-grained control of access to information from the knowledge base. But vanilla RAG has limitations, most importantly these two: - It performs only one retrieval step: if the results are bad, the generation in turn will be bad. - Semantic similarity is computed with the user query as a reference, which might be suboptimal: for instance, the user query will often be a question and the document containing the true answer will be in affirmative voice, so its similarity score will be downgraded compared to other source documents in the interrogative form, leading to a risk of missing the relevant information. We can alleviate these problems by making a RAG agent: very simply, an agent armed with a retriever tool! This agent will: ✅ Formulate the query itself and ✅ Critique to re-retrieve if needed. So it should naively recover some advanced RAG techniques! - Instead of directly using the user query as the reference in semantic search, the agent formulates itself a reference sentence that can be closer to the targeted documents, as in [HyDE](https://huggingface.co/papers/2212.10496). The agent can use the generated snippets and re-retrieve if needed, as in [Self-Query](https://docs.llamaindex.ai/en/stable/examples/evaluation/RetryQuery/). Let's build this system. 🛠️ Run the line below to install required dependencies: ```bash !pip install smolagents pandas langchain langchain-community sentence-transformers datasets python-dotenv rank_bm25 --upgrade -q ``` To call the HF Inference API, you will need a valid token as your environment variable `HF_TOKEN`. We use python-dotenv to load it. ```py from dotenv import load_dotenv load_dotenv() ``` We first load a knowledge base on which we want to perform RAG: this dataset is a compilation of the documentation pages for many Hugging Face libraries, stored as markdown. We will keep only the documentation for the `transformers` library. Then prepare the knowledge base by processing the dataset and storing it into a vector database to be used by the retriever. We use [LangChain](https://python.langchain.com/docs/introduction/) for its excellent vector database utilities. ```py import datasets from langchain.docstore.document import Document from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain_community.retrievers import BM25Retriever knowledge_base = datasets.load_dataset("m-ric/huggingface_doc", split="train") knowledge_base = knowledge_base.filter(lambda row: row["source"].startswith("huggingface/transformers")) source_docs = [ Document(page_content=doc["text"], metadata={"source": doc["source"].split("/")[1]}) for doc in knowledge_base ] text_splitter = RecursiveCharacterTextSplitter( chunk_size=500, chunk_overlap=50, add_start_index=True, strip_whitespace=True, separators=["\n\n", "\n", ".", " ", ""], ) docs_processed = text_splitter.split_documents(source_docs) ``` Now the documents are ready. So let’s build our agentic RAG system! 👉 We only need a RetrieverTool that our agent can leverage to retrieve information from the knowledge base. Since we need to add a vectordb as an attribute of the tool, we cannot simply use the simple tool constructor with a `@tool` decorator: so we will follow the advanced setup highlighted in the [tools tutorial](../tutorials/tools). ```py from smolagents import Tool class RetrieverTool(Tool): name = "retriever" description = "Uses semantic search to retrieve the parts of transformers documentation that could be most relevant to answer your query." inputs = { "query": { "type": "string", "description": "The query to perform. This should be semantically close to your target documents. Use the affirmative form rather than a question.", } } output_type = "string" def __init__(self, docs, **kwargs): super().__init__(**kwargs) self.retriever = BM25Retriever.from_documents( docs, k=10 ) def forward(self, query: str) -> str: assert isinstance(query, str), "Your search query must be a string" docs = self.retriever.invoke( query, ) return "\nRetrieved documents:\n" + "".join( [ f"\n\n===== Document {str(i)} =====\n" + doc.page_content for i, doc in enumerate(docs) ] ) retriever_tool = RetrieverTool(docs_processed) ``` We have used BM25, a classic retrieval method, because it's lightning fast to setup. To improve retrieval accuracy, you could use replace BM25 with semantic search using vector representations for documents: thus you can head to the [MTEB Leaderboard](https://huggingface.co/spaces/mteb/leaderboard) to select a good embedding model. Now it’s straightforward to create an agent that leverages this `retriever_tool`! The agent will need these arguments upon initialization: - `tools`: a list of tools that the agent will be able to call. - `model`: the LLM that powers the agent. Our `model` must be a callable that takes as input a list of messages and returns text. It also needs to accept a stop_sequences argument that indicates when to stop its generation. For convenience, we directly use the HfEngine class provided in the package to get a LLM engine that calls Hugging Face's Inference API. >[!NOTE] To use a specific model, pass it like this: `HfApiModel("meta-llama/Llama-3.3-70B-Instruct")`. The Inference API hosts models based on various criteria, and deployed models may be updated or replaced without prior notice. Learn more about it [here](https://huggingface.co/docs/api-inference/supported-models). ```py from smolagents import HfApiModel, CodeAgent agent = CodeAgent( tools=[retriever_tool], model=HfApiModel(), max_steps=4, verbosity_level=2 ) ``` Upon initializing the CodeAgent, it has been automatically given a default system prompt that tells the LLM engine to process step-by-step and generate tool calls as code snippets, but you could replace this prompt template with your own as needed. Then when its `.run()` method is launched, the agent takes care of calling the LLM engine, and executing the tool calls, all in a loop that ends only when tool `final_answer` is called with the final answer as its argument. ```py agent_output = agent.run("For a transformers model training, which is slower, the forward or the backward pass?") print("Final output:") print(agent_output) ```
smolagents/docs/source/en/examples/rag.md/0
{ "file_path": "smolagents/docs/source/en/examples/rag.md", "repo_id": "smolagents", "token_count": 2207 }
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # मल्टी-एजेंट सिस्टम का आयोजन करें 🤖🤝🤖 [[open-in-colab]] इस नोटबुक में हम एक **मल्टी-एजेंट वेब ब्राउज़र बनाएंगे: एक एजेंटिक सिस्टम जिसमें कई एजेंट वेब का उपयोग करके समस्याओं को हल करने के लिए सहयोग करते हैं!** यह एक सरल संरचना होगी, जो प्रबंधित वेब खोज एजेंट को रैप करने के लिए `ManagedAgent` ऑब्जेक्ट का उपयोग करता है: ``` +----------------+ | Manager agent | +----------------+ | _______________|______________ | | Code interpreter +--------------------------------+ tool | Managed agent | | +------------------+ | | | Web Search agent | | | +------------------+ | | | | | | Web Search tool | | | Visit webpage tool | +--------------------------------+ ``` आइए इस सिस्टम को सेट करें। आवश्यक डिपेंडेंसी इंस्टॉल करने के लिए नीचे दी गई लाइन चलाएं: ``` !pip install markdownify duckduckgo-search smolagents --upgrade -q ``` HF Inference API को कॉल करने के लिए लॉगिन करें: ``` from huggingface_hub import login login() ``` ⚡️ हमारा एजेंट [Qwen/Qwen2.5-Coder-32B-Instruct](https://huggingface.co/Qwen/Qwen2.5-Coder-32B-Instruct) द्वारा संचालित होगा जो `HfApiModel` क्लास का उपयोग करता है जो HF के Inference API का उपयोग करता है: Inference API किसी भी OS मॉडल को जल्दी और आसानी से चलाने की अनुमति देता है। _नोट:_ The Inference API विभिन्न मानदंडों के आधार पर मॉडल होस्ट करता है, और डिप्लॉय किए गए मॉडल बिना पूर्व सूचना के अपडेट या बदले जा सकते हैं। इसके बारे में अधिक जानें [यहां](https://huggingface.co/docs/api-inference/supported-models)। ```py model_id = "Qwen/Qwen2.5-Coder-32B-Instruct" ``` ## 🔍 एक वेब सर्च टूल बनाएं वेब ब्राउज़िंग के लिए, हम पहले से मौजूद [`DuckDuckGoSearchTool`](https://github.com/huggingface/smolagents/blob/main/src/smolagents/default_tools.py#L151-L176) टूल का उपयोग कर सकते हैं जो Google search के समान सुविधा प्रदान करता है। लेकिन फिर हमें `DuckDuckGoSearchTool` द्वारा खोजे गए पेज को देखने में भी सक्षम होने की आवश्यकता होगी। ऐसा करने के लिए, हम लाइब्रेरी के बिल्ट-इन `VisitWebpageTool` को इम्पोर्ट कर सकते हैं, लेकिन हम इसे फिर से बनाएंगे यह देखने के लिए कि यह कैसे किया जाता है। तो आइए `markdownify` का उपयोग करके शुरू से अपना `VisitWebpageTool` टूल बनाएं। ```py import re import requests from markdownify import markdownify from requests.exceptions import RequestException from smolagents import tool @tool def visit_webpage(url: str) -> str: """Visits a webpage at the given URL and returns its content as a markdown string. Args: url: The URL of the webpage to visit. Returns: The content of the webpage converted to Markdown, or an error message if the request fails. """ try: # Send a GET request to the URL response = requests.get(url) response.raise_for_status() # Raise an exception for bad status codes # Convert the HTML content to Markdown markdown_content = markdownify(response.text).strip() # Remove multiple line breaks markdown_content = re.sub(r"\n{3,}", "\n\n", markdown_content) return markdown_content except RequestException as e: return f"Error fetching the webpage: {str(e)}" except Exception as e: return f"An unexpected error occurred: {str(e)}" ``` ठीक है, अब चलिए हमारे टूल को टेस्ट करें! ```py print(visit_webpage("https://en.wikipedia.org/wiki/Hugging_Face")[:500]) ``` ## हमारी मल्टी-एजेंट सिस्टम का निर्माण करें 🤖🤝🤖 अब जब हमारे पास सभी टूल्स `search` और `visit_webpage` हैं, हम उनका उपयोग वेब एजेंट बनाने के लिए कर सकते हैं। इस एजेंट के लिए कौन सा कॉन्फ़िगरेशन चुनें? - वेब ब्राउज़िंग एक सिंगल-टाइमलाइन टास्क है जिसे समानांतर टूल कॉल की आवश्यकता नहीं है, इसलिए JSON टूल कॉलिंग इसके लिए अच्छी तरह काम करती है। इसलिए हम `ToolCallingAgent` चुनते हैं। - साथ ही, चूंकि कभी-कभी वेब सर्च में सही उत्तर खोजने से पहले कई पेजों की सर्च करने की आवश्यकता होती है, हम `max_steps` को बढ़ाकर 10 करना पसंद करते हैं। ```py from smolagents import ( CodeAgent, ToolCallingAgent, HfApiModel, ManagedAgent, DuckDuckGoSearchTool, LiteLLMModel, ) model = HfApiModel(model_id) web_agent = ToolCallingAgent( tools=[DuckDuckGoSearchTool(), visit_webpage], model=model, max_steps=10, ) ``` फिर हम इस एजेंट को एक `ManagedAgent` में रैप करते हैं जो इसे इसके मैनेजर एजेंट द्वारा कॉल करने योग्य बनाएगा। ```py managed_web_agent = ManagedAgent( agent=web_agent, name="search", description="Runs web searches for you. Give it your query as an argument.", ) ``` अंत में हम एक मैनेजर एजेंट बनाते हैं, और इनिशियलाइजेशन पर हम अपने मैनेज्ड एजेंट को इसके `managed_agents` आर्गुमेंट में पास करते हैं। चूंकि यह एजेंट योजना बनाने और सोचने का काम करता है, उन्नत तर्क लाभदायक होगा, इसलिए `CodeAgent` सबसे अच्छा विकल्प होगा। साथ ही, हम एक ऐसा प्रश्न पूछना चाहते हैं जिसमें वर्तमान वर्ष और अतिरिक्त डेटा गणना शामिल है: इसलिए आइए `additional_authorized_imports=["time", "numpy", "pandas"]` जोड़ें, यदि एजेंट को इन पैकेजों की आवश्यकता हो। ```py manager_agent = CodeAgent( tools=[], model=model, managed_agents=[managed_web_agent], additional_authorized_imports=["time", "numpy", "pandas"], ) ``` बस इतना ही! अब चलिए हमारे सिस्टम को चलाते हैं! हम एक ऐसा प्रश्न चुनते हैं जिसमें गणना और शोध दोनों की आवश्यकता है। ```py answer = manager_agent.run("If LLM training continues to scale up at the current rhythm until 2030, what would be the electric power in GW required to power the biggest training runs by 2030? What would that correspond to, compared to some countries? Please provide a source for any numbers used.") ``` We get this report as the answer: ``` Based on current growth projections and energy consumption estimates, if LLM trainings continue to scale up at the current rhythm until 2030: 1. The electric power required to power the biggest training runs by 2030 would be approximately 303.74 GW, which translates to about 2,660,762 GWh/year. 2. Comparing this to countries' electricity consumption: - It would be equivalent to about 34% of China's total electricity consumption. - It would exceed the total electricity consumption of India (184%), Russia (267%), and Japan (291%). - It would be nearly 9 times the electricity consumption of countries like Italy or Mexico. 3. Source of numbers: - The initial estimate of 5 GW for future LLM training comes from AWS CEO Matt Garman. - The growth projection used a CAGR of 79.80% from market research by Springs. - Country electricity consumption data is from the U.S. Energy Information Administration, primarily for the year 2021. ``` लगता है कि यदि [स्केलिंग हाइपोथिसिस](https://gwern.net/scaling-hypothesis) सत्य बनी रहती है तो हमें कुछ बड़े पावरप्लांट्स की आवश्यकता होगी। हमारे एजेंट्स ने कार्य को हल करने के लिए कुशलतापूर्वक सहयोग किया! ✅ 💡 आप इस ऑर्केस्ट्रेशन को आसानी से अधिक एजेंट्स में विस्तारित कर सकते हैं: एक कोड एक्जीक्यूशन करता है, एक वेब सर्च करता है, एक फाइल लोडिंग को संभालता है।
smolagents/docs/source/hi/examples/multiagents.md/0
{ "file_path": "smolagents/docs/source/hi/examples/multiagents.md", "repo_id": "smolagents", "token_count": 5840 }
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Agentic RAG [[open-in-colab]] Retrieval-Augmented-Generation (RAG) 是“使用大语言模型(LLM)来回答用户查询,但基于从知识库中检索的信息”。它比使用普通或微调的 LLM 具有许多优势:举几个例子,它允许将答案基于真实事实并减少虚构;它允许提供 LLM 领域特定的知识;并允许对知识库中的信息访问进行精细控制。 但是,普通的 RAG 存在一些局限性,以下两点尤为突出: - 它只执行一次检索步骤:如果结果不好,生成的内容也会不好。 - 语义相似性是以用户查询为参考计算的,这可能不是最优的:例如,用户查询通常是一个问题,而包含真实答案的文档通常是肯定语态,因此其相似性得分会比其他以疑问形式呈现的源文档低,从而导致错失相关信息的风险。 我们可以通过制作一个 RAG agent来缓解这些问题:非常简单,一个配备了检索工具的agent!这个 agent 将 会:✅ 自己构建查询和检索,✅ 如果需要的话会重新检索。 因此,它将比普通 RAG 更智能,因为它可以自己构建查询,而不是直接使用用户查询作为参考。这样,它可以更 接近目标文档,从而提高检索的准确性, [HyDE](https://huggingface.co/papers/2212.10496)。此 agent 可以 使用生成的片段,并在需要时重新检索,就像 [Self-Query](https://docs.llamaindex.ai/en/stable/examples/evaluation/RetryQuery/)。 我们现在开始构建这个系统. 🛠️ 运行以下代码以安装所需的依赖包: ```bash !pip install smolagents pandas langchain langchain-community sentence-transformers rank_bm25 --upgrade -q ``` 你需要一个有效的 token 作为环境变量 `HF_TOKEN` 来调用 HF Inference API。我们使用 python-dotenv 来加载它。 ```py from dotenv import load_dotenv load_dotenv() ``` 我们首先加载一个知识库以在其上执行 RAG:此数据集是许多 Hugging Face 库的文档页面的汇编,存储为 markdown 格式。我们将仅保留 `transformers` 库的文档。然后通过处理数据集并将其存储到向量数据库中,为检索器准备知识库。我们将使用 [LangChain](https://python.langchain.com/docs/introduction/) 来利用其出色的向量数据库工具。 ```py import datasets from langchain.docstore.document import Document from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain_community.retrievers import BM25Retriever knowledge_base = datasets.load_dataset("m-ric/huggingface_doc", split="train") knowledge_base = knowledge_base.filter(lambda row: row["source"].startswith("huggingface/transformers")) source_docs = [ Document(page_content=doc["text"], metadata={"source": doc["source"].split("/")[1]}) for doc in knowledge_base ] text_splitter = RecursiveCharacterTextSplitter( chunk_size=500, chunk_overlap=50, add_start_index=True, strip_whitespace=True, separators=["\n\n", "\n", ".", " ", ""], ) docs_processed = text_splitter.split_documents(source_docs) ``` 现在文档已准备好。我们来一起构建我们的 agent RAG 系统! 👉 我们只需要一个 RetrieverTool,我们的 agent 可以利用它从知识库中检索信息。 由于我们需要将 vectordb 添加为工具的属性,我们不能简单地使用带有 `@tool` 装饰器的简单工具构造函数:因此我们将遵循 [tools 教程](../tutorials/tools) 中突出显示的高级设置。 ```py from smolagents import Tool class RetrieverTool(Tool): name = "retriever" description = "Uses semantic search to retrieve the parts of transformers documentation that could be most relevant to answer your query." inputs = { "query": { "type": "string", "description": "The query to perform. This should be semantically close to your target documents. Use the affirmative form rather than a question.", } } output_type = "string" def __init__(self, docs, **kwargs): super().__init__(**kwargs) self.retriever = BM25Retriever.from_documents( docs, k=10 ) def forward(self, query: str) -> str: assert isinstance(query, str), "Your search query must be a string" docs = self.retriever.invoke( query, ) return "\nRetrieved documents:\n" + "".join( [ f"\n\n===== Document {str(i)} =====\n" + doc.page_content for i, doc in enumerate(docs) ] ) retriever_tool = RetrieverTool(docs_processed) ``` BM25 检索方法是一个经典的检索方法,因为它的设置速度非常快。为了提高检索准确性,你可以使用语义搜索,使用文档的向量表示替换 BM25:因此你可以前往 [MTEB Leaderboard](https://huggingface.co/spaces/mteb/leaderboard) 选择一个好的嵌入模型。 现在我们已经创建了一个可以从知识库中检索信息的工具,现在我们可以很容易地创建一个利用这个 `retriever_tool` 的 agent!此 agent 将使用如下参数初始化: - `tools`:代理将能够调用的工具列表。 - `model`:为代理提供动力的 LLM。 我们的 `model` 必须是一个可调用对象,它接受一个消息的 list 作为输入,并返回文本。它还需要接受一个 stop_sequences 参数,指示何时停止生成。为了方便起见,我们直接使用包中提供的 `HfEngine` 类来获取调用 Hugging Face 的 Inference API 的 LLM 引擎。 接着,我们将使用 [meta-llama/Llama-3.3-70B-Instruct](meta-llama/Llama-3.3-70B-Instruct) 作为 llm 引 擎,因为: - 它有一个长 128k 上下文,这对处理长源文档很有用。 - 它在 HF 的 Inference API 上始终免费提供! _Note:_ 此 Inference API 托管基于各种标准的模型,部署的模型可能会在没有事先通知的情况下进行更新或替换。了解更多信息,请点击[这里](https://huggingface.co/docs/api-inference/supported-models)。 ```py from smolagents import HfApiModel, CodeAgent agent = CodeAgent( tools=[retriever_tool], model=HfApiModel("meta-llama/Llama-3.3-70B-Instruct"), max_steps=4, verbose=True ) ``` 当我们初始化 CodeAgent 时,它已经自动获得了一个默认的系统提示,告诉 LLM 引擎按步骤处理并生成工具调用作为代码片段,但你可以根据需要替换此提示模板。接着,当其 `.run()` 方法被调用时,代理将负责调用 LLM 引擎,并在循环中执行工具调用,直到工具 `final_answer` 被调用,而其参数为最终答案。 ```py agent_output = agent.run("For a transformers model training, which is slower, the forward or the backward pass?") print("Final output:") print(agent_output) ```
smolagents/docs/source/zh/examples/rag.md/0
{ "file_path": "smolagents/docs/source/zh/examples/rag.md", "repo_id": "smolagents", "token_count": 4008 }
from typing import Optional import requests # from smolagents.agents import ToolCallingAgent from smolagents import CodeAgent, HfApiModel, tool # Choose which LLM engine to use! model = HfApiModel() # model = TransformersModel(model_id="meta-llama/Llama-3.2-2B-Instruct") # For anthropic: change model_id below to 'anthropic/claude-3-5-sonnet-20240620' # model = LiteLLMModel(model_id="gpt-4o") @tool def get_weather(location: str, celsius: Optional[bool] = False) -> str: """ Get the current weather at the given location using the WeatherStack API. Args: location: The location (city name). celsius: Whether to return the temperature in Celsius (default is False, which returns Fahrenheit). Returns: A string describing the current weather at the location. """ api_key = "your_api_key" # Replace with your API key from https://weatherstack.com/ units = "m" if celsius else "f" # 'm' for Celsius, 'f' for Fahrenheit url = f"http://api.weatherstack.com/current?access_key={api_key}&query={location}&units={units}" try: response = requests.get(url) response.raise_for_status() # Raise an exception for HTTP errors data = response.json() if data.get("error"): # Check if there's an error in the response return f"Error: {data['error'].get('info', 'Unable to fetch weather data.')}" weather = data["current"]["weather_descriptions"][0] temp = data["current"]["temperature"] temp_unit = "°C" if celsius else "°F" return f"The current weather in {location} is {weather} with a temperature of {temp} {temp_unit}." except requests.exceptions.RequestException as e: return f"Error fetching weather data: {str(e)}" @tool def convert_currency(amount: float, from_currency: str, to_currency: str) -> str: """ Converts a specified amount from one currency to another using the ExchangeRate-API. Args: amount: The amount of money to convert. from_currency: The currency code of the currency to convert from (e.g., 'USD'). to_currency: The currency code of the currency to convert to (e.g., 'EUR'). Returns: str: A string describing the converted amount in the target currency, or an error message if the conversion fails. Raises: requests.exceptions.RequestException: If there is an issue with the HTTP request to the ExchangeRate-API. """ api_key = "your_api_key" # Replace with your actual API key from https://www.exchangerate-api.com/ url = f"https://v6.exchangerate-api.com/v6/{api_key}/latest/{from_currency}" try: response = requests.get(url) response.raise_for_status() data = response.json() exchange_rate = data["conversion_rates"].get(to_currency) if not exchange_rate: return f"Error: Unable to find exchange rate for {from_currency} to {to_currency}." converted_amount = amount * exchange_rate return f"{amount} {from_currency} is equal to {converted_amount} {to_currency}." except requests.exceptions.RequestException as e: return f"Error fetching conversion data: {str(e)}" @tool def get_news_headlines() -> str: """ Fetches the top news headlines from the News API for the United States. This function makes a GET request to the News API to retrieve the top news headlines for the United States. It returns the titles and sources of the top 5 articles as a formatted string. If no articles are available, it returns a message indicating that no news is available. In case of a request error, it returns an error message. Returns: str: A string containing the top 5 news headlines and their sources, or an error message. """ api_key = "your_api_key" # Replace with your actual API key from https://newsapi.org/ url = f"https://newsapi.org/v2/top-headlines?country=us&apiKey={api_key}" try: response = requests.get(url) response.raise_for_status() data = response.json() articles = data["articles"] if not articles: return "No news available at the moment." headlines = [f"{article['title']} - {article['source']['name']}" for article in articles[:5]] return "\n".join(headlines) except requests.exceptions.RequestException as e: return f"Error fetching news data: {str(e)}" @tool def get_joke() -> str: """ Fetches a random joke from the JokeAPI. This function sends a GET request to the JokeAPI to retrieve a random joke. It handles both single jokes and two-part jokes (setup and delivery). If the request fails or the response does not contain a joke, an error message is returned. Returns: str: The joke as a string, or an error message if the joke could not be fetched. """ url = "https://v2.jokeapi.dev/joke/Any?type=single" try: response = requests.get(url) response.raise_for_status() data = response.json() if "joke" in data: return data["joke"] elif "setup" in data and "delivery" in data: return f"{data['setup']} - {data['delivery']}" else: return "Error: Unable to fetch joke." except requests.exceptions.RequestException as e: return f"Error fetching joke: {str(e)}" @tool def get_time_in_timezone(location: str) -> str: """ Fetches the current time for a given location using the World Time API. Args: location: The location for which to fetch the current time, formatted as 'Region/City'. Returns: str: A string indicating the current time in the specified location, or an error message if the request fails. Raises: requests.exceptions.RequestException: If there is an issue with the HTTP request. """ url = f"http://worldtimeapi.org/api/timezone/{location}.json" try: response = requests.get(url) response.raise_for_status() data = response.json() current_time = data["datetime"] return f"The current time in {location} is {current_time}." except requests.exceptions.RequestException as e: return f"Error fetching time data: {str(e)}" @tool def get_random_fact() -> str: """ Fetches a random fact from the "uselessfacts.jsph.pl" API. Returns: str: A string containing the random fact or an error message if the request fails. """ url = "https://uselessfacts.jsph.pl/random.json?language=en" try: response = requests.get(url) response.raise_for_status() data = response.json() return f"Random Fact: {data['text']}" except requests.exceptions.RequestException as e: return f"Error fetching random fact: {str(e)}" @tool def search_wikipedia(query: str) -> str: """ Fetches a summary of a Wikipedia page for a given query. Args: query: The search term to look up on Wikipedia. Returns: str: A summary of the Wikipedia page if successful, or an error message if the request fails. Raises: requests.exceptions.RequestException: If there is an issue with the HTTP request. """ url = f"https://en.wikipedia.org/api/rest_v1/page/summary/{query}" try: response = requests.get(url) response.raise_for_status() data = response.json() title = data["title"] extract = data["extract"] return f"Summary for {title}: {extract}" except requests.exceptions.RequestException as e: return f"Error fetching Wikipedia data: {str(e)}" # If you want to use the ToolCallingAgent instead, uncomment the following lines as they both will work # agent = ToolCallingAgent( # tools=[ # convert_currency, # get_weather, # get_news_headlines, # get_joke, # get_random_fact, # search_wikipedia, # ], # model=model, # ) agent = CodeAgent( tools=[ convert_currency, get_weather, get_news_headlines, get_joke, get_random_fact, search_wikipedia, ], model=model, ) # Uncomment the line below to run the agent with a specific query agent.run("5000 dollars to Euros") # agent.run("What is the weather in New York?") # agent.run("Give me the top news headlines") # agent.run("Tell me a joke") # agent.run("Tell me a Random Fact") # agent.run("who is Elon Musk?")
smolagents/examples/multiple_tools.py/0
{ "file_path": "smolagents/examples/multiple_tools.py", "repo_id": "smolagents", "token_count": 3106 }
from sqlalchemy import ( Column, Float, Integer, MetaData, String, Table, create_engine, insert, inspect, text, ) engine = create_engine("sqlite:///:memory:") metadata_obj = MetaData() # create city SQL table table_name = "receipts" receipts = Table( table_name, metadata_obj, Column("receipt_id", Integer, primary_key=True), Column("customer_name", String(16), primary_key=True), Column("price", Float), Column("tip", Float), ) metadata_obj.create_all(engine) rows = [ {"receipt_id": 1, "customer_name": "Alan Payne", "price": 12.06, "tip": 1.20}, {"receipt_id": 2, "customer_name": "Alex Mason", "price": 23.86, "tip": 0.24}, {"receipt_id": 3, "customer_name": "Woodrow Wilson", "price": 53.43, "tip": 5.43}, {"receipt_id": 4, "customer_name": "Margaret James", "price": 21.11, "tip": 1.00}, ] for row in rows: stmt = insert(receipts).values(**row) with engine.begin() as connection: cursor = connection.execute(stmt) inspector = inspect(engine) columns_info = [(col["name"], col["type"]) for col in inspector.get_columns("receipts")] table_description = "Columns:\n" + "\n".join([f" - {name}: {col_type}" for name, col_type in columns_info]) print(table_description) from smolagents import tool @tool def sql_engine(query: str) -> str: """ Allows you to perform SQL queries on the table. Returns a string representation of the result. The table is named 'receipts'. Its description is as follows: Columns: - receipt_id: INTEGER - customer_name: VARCHAR(16) - price: FLOAT - tip: FLOAT Args: query: The query to perform. This should be correct SQL. """ output = "" with engine.connect() as con: rows = con.execute(text(query)) for row in rows: output += "\n" + str(row) return output from smolagents import CodeAgent, HfApiModel agent = CodeAgent( tools=[sql_engine], model=HfApiModel("meta-llama/Meta-Llama-3.1-8B-Instruct"), ) agent.run("Can you give me the name of the client who got the most expensive receipt?")
smolagents/examples/text_to_sql.py/0
{ "file_path": "smolagents/examples/text_to_sql.py", "repo_id": "smolagents", "token_count": 858 }
import ast import builtins import inspect from typing import Set from .utils import BASE_BUILTIN_MODULES, get_source _BUILTIN_NAMES = set(vars(builtins)) class MethodChecker(ast.NodeVisitor): """ Checks that a method - only uses defined names - contains no local imports (e.g. numpy is ok but local_script is not) """ def __init__(self, class_attributes: Set[str], check_imports: bool = True): self.undefined_names = set() self.imports = {} self.from_imports = {} self.assigned_names = set() self.arg_names = set() self.class_attributes = class_attributes self.errors = [] self.check_imports = check_imports self.typing_names = {"Any"} def visit_arguments(self, node): """Collect function arguments""" self.arg_names = {arg.arg for arg in node.args} if node.kwarg: self.arg_names.add(node.kwarg.arg) if node.vararg: self.arg_names.add(node.vararg.arg) def visit_Import(self, node): for name in node.names: actual_name = name.asname or name.name self.imports[actual_name] = name.name def visit_ImportFrom(self, node): module = node.module or "" for name in node.names: actual_name = name.asname or name.name self.from_imports[actual_name] = (module, name.name) def visit_Assign(self, node): for target in node.targets: if isinstance(target, ast.Name): self.assigned_names.add(target.id) self.visit(node.value) def visit_With(self, node): """Track aliases in 'with' statements (the 'y' in 'with X as y')""" for item in node.items: if item.optional_vars: # This is the 'y' in 'with X as y' if isinstance(item.optional_vars, ast.Name): self.assigned_names.add(item.optional_vars.id) self.generic_visit(node) def visit_ExceptHandler(self, node): """Track exception aliases (the 'e' in 'except Exception as e')""" if node.name: # This is the 'e' in 'except Exception as e' self.assigned_names.add(node.name) self.generic_visit(node) def visit_AnnAssign(self, node): """Track annotated assignments.""" if isinstance(node.target, ast.Name): self.assigned_names.add(node.target.id) if node.value: self.visit(node.value) def visit_For(self, node): target = node.target if isinstance(target, ast.Name): self.assigned_names.add(target.id) elif isinstance(target, ast.Tuple): for elt in target.elts: if isinstance(elt, ast.Name): self.assigned_names.add(elt.id) self.generic_visit(node) def visit_Attribute(self, node): if not (isinstance(node.value, ast.Name) and node.value.id == "self"): self.generic_visit(node) def visit_Name(self, node): if isinstance(node.ctx, ast.Load): if not ( node.id in _BUILTIN_NAMES or node.id in BASE_BUILTIN_MODULES or node.id in self.arg_names or node.id == "self" or node.id in self.class_attributes or node.id in self.imports or node.id in self.from_imports or node.id in self.assigned_names or node.id in self.typing_names ): self.errors.append(f"Name '{node.id}' is undefined.") def visit_Call(self, node): if isinstance(node.func, ast.Name): if not ( node.func.id in _BUILTIN_NAMES or node.func.id in BASE_BUILTIN_MODULES or node.func.id in self.arg_names or node.func.id == "self" or node.func.id in self.class_attributes or node.func.id in self.imports or node.func.id in self.from_imports or node.func.id in self.assigned_names ): self.errors.append(f"Name '{node.func.id}' is undefined.") self.generic_visit(node) def validate_tool_attributes(cls, check_imports: bool = True) -> None: """ Validates that a Tool class follows the proper patterns: 0. __init__ takes no argument (args chosen at init are not traceable so we cannot rebuild the source code for them, make them class attributes!). 1. About the class: - Class attributes should only be strings or dicts - Class attributes cannot be complex attributes 2. About all class methods: - Imports must be from packages, not local files - All methods must be self-contained Raises all errors encountered, if no error returns None. """ errors = [] source = get_source(cls) tree = ast.parse(source) if not isinstance(tree.body[0], ast.ClassDef): raise ValueError("Source code must define a class") # Check that __init__ method takes no arguments if not cls.__init__.__qualname__ == "Tool.__init__": sig = inspect.signature(cls.__init__) non_self_params = list([arg_name for arg_name in sig.parameters.keys() if arg_name != "self"]) if len(non_self_params) > 0: errors.append( f"This tool has additional args specified in __init__(self): {non_self_params}. Make sure it does not, all values should be hardcoded!" ) class_node = tree.body[0] class ClassLevelChecker(ast.NodeVisitor): def __init__(self): self.imported_names = set() self.complex_attributes = set() self.class_attributes = set() self.in_method = False def visit_FunctionDef(self, node): old_context = self.in_method self.in_method = True self.generic_visit(node) self.in_method = old_context def visit_Assign(self, node): if self.in_method: return # Track class attributes for target in node.targets: if isinstance(target, ast.Name): self.class_attributes.add(target.id) # Check if the assignment is more complex than simple literals if not all( isinstance(val, (ast.Str, ast.Num, ast.Constant, ast.Dict, ast.List, ast.Set)) for val in ast.walk(node.value) ): for target in node.targets: if isinstance(target, ast.Name): self.complex_attributes.add(target.id) class_level_checker = ClassLevelChecker() class_level_checker.visit(class_node) if class_level_checker.complex_attributes: errors.append( f"Complex attributes should be defined in __init__, not as class attributes: " f"{', '.join(class_level_checker.complex_attributes)}" ) # Run checks on all methods for node in class_node.body: if isinstance(node, ast.FunctionDef): method_checker = MethodChecker(class_level_checker.class_attributes, check_imports=check_imports) method_checker.visit(node) errors += [f"- {node.name}: {error}" for error in method_checker.errors] if errors: raise ValueError("Tool validation failed:\n" + "\n".join(errors)) return
smolagents/src/smolagents/tool_validation.py/0
{ "file_path": "smolagents/src/smolagents/tool_validation.py", "repo_id": "smolagents", "token_count": 3416 }
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import unittest from pathlib import Path from typing import Optional from unittest.mock import MagicMock, patch import pytest from transformers.testing_utils import get_tests_dir from smolagents import ChatMessage, HfApiModel, TransformersModel, models, tool from smolagents.models import MessageRole, get_clean_message_list, parse_json_if_needed class ModelTests(unittest.TestCase): def test_get_json_schema_has_nullable_args(self): @tool def get_weather(location: str, celsius: Optional[bool] = False) -> str: """ Get weather in the next days at given location. Secretly this tool does not care about the location, it hates the weather everywhere. Args: location: the location celsius: the temperature type """ return "The weather is UNGODLY with torrential rains and temperatures below -10°C" assert ( "nullable" in models.get_tool_json_schema(get_weather)["function"]["parameters"]["properties"]["celsius"] ) def test_chatmessage_has_model_dumps_json(self): message = ChatMessage("user", [{"type": "text", "text": "Hello!"}]) data = json.loads(message.model_dump_json()) assert data["content"] == [{"type": "text", "text": "Hello!"}] def test_get_hfapi_message_no_tool(self): model = HfApiModel(max_tokens=10) messages = [{"role": "user", "content": [{"type": "text", "text": "Hello!"}]}] model(messages, stop_sequences=["great"]) @pytest.mark.skipif(not os.getenv("RUN_ALL"), reason="RUN_ALL environment variable not set") def test_get_hfapi_message_no_tool_external_provider(self): model = HfApiModel(model="Qwen/Qwen2.5-Coder-32B-Instruct", provider="together", max_tokens=10) messages = [{"role": "user", "content": [{"type": "text", "text": "Hello!"}]}] model(messages, stop_sequences=["great"]) def test_transformers_message_no_tool(self): model = TransformersModel( model_id="HuggingFaceTB/SmolLM2-135M-Instruct", max_new_tokens=5, device_map="auto", do_sample=False, ) messages = [{"role": "user", "content": [{"type": "text", "text": "Hello!"}]}] output = model(messages, stop_sequences=["great"]).content assert output == "assistant\nHello" def test_transformers_message_vl_no_tool(self): from PIL import Image img = Image.open(Path(get_tests_dir("fixtures")) / "000000039769.png") model = TransformersModel( model_id="llava-hf/llava-interleave-qwen-0.5b-hf", max_new_tokens=5, device_map="auto", do_sample=False, ) messages = [{"role": "user", "content": [{"type": "text", "text": "Hello!"}, {"type": "image", "image": img}]}] output = model(messages, stop_sequences=["great"]).content assert output == "Hello! How can" def test_parse_json_if_needed(self): args = "abc" parsed_args = parse_json_if_needed(args) assert parsed_args == "abc" args = '{"a": 3}' parsed_args = parse_json_if_needed(args) assert parsed_args == {"a": 3} args = "3" parsed_args = parse_json_if_needed(args) assert parsed_args == 3 args = 3 parsed_args = parse_json_if_needed(args) assert parsed_args == 3 class TestHfApiModel: def test_call_with_custom_role_conversions(self): custom_role_conversions = {MessageRole.USER: MessageRole.SYSTEM} model = HfApiModel(model_id="test-model", custom_role_conversions=custom_role_conversions) model.client = MagicMock() messages = [{"role": "user", "content": "Test message"}] _ = model(messages) # Verify that the role conversion was applied assert model.client.chat_completion.call_args.kwargs["messages"][0]["role"] == "system", ( "role conversion should be applied" ) def test_get_clean_message_list_basic(): messages = [ {"role": "user", "content": [{"type": "text", "text": "Hello!"}]}, {"role": "assistant", "content": [{"type": "text", "text": "Hi there!"}]}, ] result = get_clean_message_list(messages) assert len(result) == 2 assert result[0]["role"] == "user" assert result[0]["content"][0]["text"] == "Hello!" assert result[1]["role"] == "assistant" assert result[1]["content"][0]["text"] == "Hi there!" def test_get_clean_message_list_role_conversions(): messages = [ {"role": "tool-call", "content": [{"type": "text", "text": "Calling tool..."}]}, {"role": "tool-response", "content": [{"type": "text", "text": "Tool response"}]}, ] result = get_clean_message_list(messages, role_conversions={"tool-call": "assistant", "tool-response": "user"}) assert len(result) == 2 assert result[0]["role"] == "assistant" assert result[0]["content"][0]["text"] == "Calling tool..." assert result[1]["role"] == "user" assert result[1]["content"][0]["text"] == "Tool response" @pytest.mark.parametrize( "convert_images_to_image_urls, expected_clean_message", [ ( False, { "role": "user", "content": [ {"type": "image", "image": "encoded_image"}, {"type": "image", "image": "second_encoded_image"}, ], }, ), ( True, { "role": "user", "content": [ {"type": "image_url", "image_url": {"url": "data:image/png;base64,encoded_image"}}, {"type": "image_url", "image_url": {"url": "data:image/png;base64,second_encoded_image"}}, ], }, ), ], ) def test_get_clean_message_list_image_encoding(convert_images_to_image_urls, expected_clean_message): messages = [ { "role": "user", "content": [{"type": "image", "image": b"image_data"}, {"type": "image", "image": b"second_image_data"}], } ] with patch("smolagents.models.encode_image_base64") as mock_encode: mock_encode.side_effect = ["encoded_image", "second_encoded_image"] result = get_clean_message_list(messages, convert_images_to_image_urls=convert_images_to_image_urls) mock_encode.assert_any_call(b"image_data") mock_encode.assert_any_call(b"second_image_data") assert len(result) == 1 assert result[0] == expected_clean_message def test_get_clean_message_list_flatten_messages_as_text(): messages = [ {"role": "user", "content": [{"type": "text", "text": "Hello!"}]}, {"role": "user", "content": [{"type": "text", "text": "How are you?"}]}, ] result = get_clean_message_list(messages, flatten_messages_as_text=True) assert len(result) == 1 assert result[0]["role"] == "user" assert result[0]["content"] == "Hello!How are you?"
smolagents/tests/test_models.py/0
{ "file_path": "smolagents/tests/test_models.py", "repo_id": "smolagents", "token_count": 3273 }
ARG PLATFORM=xpu FROM lukemathwalker/cargo-chef:latest-rust-1.84.0 AS chef WORKDIR /usr/src ARG CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse FROM chef AS planner COPY Cargo.lock Cargo.lock COPY Cargo.toml Cargo.toml COPY rust-toolchain.toml rust-toolchain.toml COPY proto proto COPY benchmark benchmark COPY router router COPY backends backends COPY launcher launcher RUN cargo chef prepare --recipe-path recipe.json FROM chef AS builder RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ python3.11-dev RUN PROTOC_ZIP=protoc-21.12-linux-x86_64.zip && \ curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v21.12/$PROTOC_ZIP && \ unzip -o $PROTOC_ZIP -d /usr/local bin/protoc && \ unzip -o $PROTOC_ZIP -d /usr/local 'include/*' && \ rm -f $PROTOC_ZIP COPY --from=planner /usr/src/recipe.json recipe.json RUN cargo chef cook --profile release-opt --recipe-path recipe.json ARG GIT_SHA ARG DOCKER_LABEL COPY Cargo.lock Cargo.lock COPY Cargo.toml Cargo.toml COPY rust-toolchain.toml rust-toolchain.toml COPY proto proto COPY benchmark benchmark COPY router router COPY backends backends COPY launcher launcher RUN cargo build --profile release-opt --frozen # Text Generation Inference base image for Intel FROM intel/oneapi-basekit:2024.2.1-0-devel-ubuntu22.04 AS xpu USER root ARG MAMBA_VERSION=23.1.0-1 ARG PYTHON_VERSION='3.11.10' # Automatically set by buildx ARG TARGETPLATFORM ENV PATH=/opt/conda/bin:$PATH # TGI seem to require libssl.so.1.1 instead of libssl.so.3 so we can't use ubuntu 22.04. Ubuntu 20.04 has python==3.8, and TGI requires python>=3.9, hence the need for miniconda. # Install mamba # translating Docker's TARGETPLATFORM into mamba arches RUN case ${TARGETPLATFORM} in \ "linux/arm64") MAMBA_ARCH=aarch64 ;; \ *) MAMBA_ARCH=x86_64 ;; \ esac && \ curl -fsSL -v -o ~/mambaforge.sh -O "https://github.com/conda-forge/miniforge/releases/download/${MAMBA_VERSION}/Mambaforge-${MAMBA_VERSION}-Linux-${MAMBA_ARCH}.sh" RUN chmod +x ~/mambaforge.sh && \ bash ~/mambaforge.sh -b -p /opt/conda && \ rm ~/mambaforge.sh RUN case ${TARGETPLATFORM} in \ "linux/arm64") exit 1 ;; \ *) /opt/conda/bin/conda update -y conda && \ /opt/conda/bin/conda install -y "python=${PYTHON_VERSION}" ;; \ esac && \ /opt/conda/bin/conda clean -ya # libssl.so.1.1 is not installed on Ubuntu 22.04 by default, install it RUN wget http://nz2.archive.ubuntu.com/ubuntu/pool/main/o/openssl/libssl1.1_1.1.1f-1ubuntu2_amd64.deb && \ dpkg -i ./libssl1.1_1.1.1f-1ubuntu2_amd64.deb RUN wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | gpg --dearmor | tee /usr/share/keyrings/intel-graphics.gpg > /dev/null RUN wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB \ | gpg --dearmor | tee /usr/share/keyrings/oneapi-archive-keyring.gpg > /dev/null && echo "deb [signed-by=/usr/share/keyrings/oneapi-archive-keyring.gpg] https://apt.repos.intel.com/oneapi all main" | tee /etc/apt/sources.list.d/oneAPI.list RUN echo "deb [signed-by=/usr/share/keyrings/oneapi-archive-keyring.gpg] https://apt.repos.intel.com/intel-for-pytorch-gpu-dev all main" > /tmp/intel-for-pytorch-gpu-dev.list RUN mv /tmp/intel-for-pytorch-gpu-dev.list /etc/apt/sources.list.d RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt install -y xpu-smi cmake ninja-build pciutils intel-pti-dev-0.9 # Text Generation Inference base env ENV HF_HOME=/data \ HF_HUB_ENABLE_HF_TRANSFER=1 \ PORT=80 WORKDIR /usr/src RUN pip install https://intel-optimized-pytorch.s3.cn-north-1.amazonaws.com.cn/ipex_dev/xpu/torch-2.5.0a0%2Bgite84e33f-cp311-cp311-linux_x86_64.whl --no-cache-dir RUN pip install https://intel-optimized-pytorch.s3.cn-north-1.amazonaws.com.cn/ipex_dev/xpu/torchaudio-2.5.0a0%2B56bc006-cp311-cp311-linux_x86_64.whl --no-cache-dir RUN pip install https://intel-optimized-pytorch.s3.cn-north-1.amazonaws.com.cn/ipex_dev/xpu/torchvision-0.20.0a0%2B8e8a208-cp311-cp311-linux_x86_64.whl --no-cache-dir RUN pip install https://intel-optimized-pytorch.s3.cn-north-1.amazonaws.com.cn/ipex_dev/xpu/oneccl_bind_pt-2.5.0%2Bxpu-cp311-cp311-linux_x86_64.whl --no-cache-dir RUN pip install triton-xpu==3.0.0b2 --no-cache-dir # Install server COPY proto proto COPY server server COPY server/Makefile server/Makefile ENV UV_SYSTEM_PYTHON=1 RUN cd server && \ make gen-server && \ pip install -U pip uv && \ uv pip install -e ".[accelerate, compressed-tensors, peft, outlines]" --no-cache-dir ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/intel/oneapi/pti/0.9/lib:/opt/conda/lib ENV CCL_ZE_IPC_EXCHANGE=sockets #ENV TORCH_LLM_ALLREDUCE=1 #ENV CCL_TOPO_FABRIC_VERTEX_CONNECTION_CHECK=0 ENV TORCH_DEVICE_BACKEND_AUTOLOAD=0 RUN git clone https://github.com/intel/intel-extension-for-pytorch && cd intel-extension-for-pytorch && git checkout 1ccf72b2d11cd00b47aef6d6cd054c088aa6f083 RUN cd intel-extension-for-pytorch && git submodule update --init --recursive && USE_AOT_DEVLIST='pvc,ats-m150' BUILD_SEPARATE_OPS=OFF BUILD_WITH_CPU=OFF USE_XETLA=ON python setup.py install && rm -rf /usr/src/intel-extension-for-pytorch # Install benchmarker COPY --from=builder /usr/src/target/release-opt/text-generation-benchmark /usr/local/bin/text-generation-benchmark # Install router COPY --from=builder /usr/src/target/release-opt/text-generation-router /usr/local/bin/text-generation-router # Install launcher COPY --from=builder /usr/src/target/release-opt/text-generation-launcher /usr/local/bin/text-generation-launcher # Text Generation Inference base image for Intel-cpu FROM ubuntu:22.04 AS cpu RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ curl \ ca-certificates \ make \ g++-12 \ gcc-12 \ git \ wget \ cmake \ libnuma-dev RUN update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-12 12 RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 12 RUN update-alternatives --install /usr/bin/cc cc /usr/bin/gcc 30 RUN update-alternatives --set cc /usr/bin/gcc RUN update-alternatives --install /usr/bin/c++ c++ /usr/bin/g++ 30 RUN update-alternatives --set c++ /usr/bin/g++ ENV HUGGINGFACE_HUB_CACHE=/data \ HF_HUB_ENABLE_HF_TRANSFER=1 \ PORT=80 ARG MAMBA_VERSION=23.1.0-1 ARG PYTHON_VERSION='3.11.10' # Automatically set by buildx ARG TARGETPLATFORM ENV PATH /opt/conda/bin:$PATH # TGI seem to require libssl.so.1.1 instead of libssl.so.3 so we can't use ubuntu 22.04. Ubuntu 20.04 has python==3.8, and TGI requires python>=3.9, hence the need for miniconda. # Install mamba # translating Docker's TARGETPLATFORM into mamba arches RUN case ${TARGETPLATFORM} in \ "linux/arm64") MAMBA_ARCH=aarch64 ;; \ *) MAMBA_ARCH=x86_64 ;; \ esac && \ curl -fsSL -v -o ~/mambaforge.sh -O "https://github.com/conda-forge/miniforge/releases/download/${MAMBA_VERSION}/Mambaforge-${MAMBA_VERSION}-Linux-${MAMBA_ARCH}.sh" RUN chmod +x ~/mambaforge.sh && \ bash ~/mambaforge.sh -b -p /opt/conda && \ rm ~/mambaforge.sh RUN case ${TARGETPLATFORM} in \ "linux/arm64") exit 1 ;; \ *) /opt/conda/bin/conda update -y conda && \ /opt/conda/bin/conda install -y "python=${PYTHON_VERSION}" ;; \ esac && \ /opt/conda/bin/conda clean -ya RUN conda install -c conda-forge gperftools mkl RUN pip install https://download.pytorch.org/whl/nightly/cpu/torch-2.5.0.dev20240815%2Bcpu-cp311-cp311-linux_x86_64.whl RUN pip install https://download.pytorch.org/whl/nightly/cpu/torchvision-0.20.0.dev20240815%2Bcpu-cp311-cp311-linux_x86_64.whl RUN pip install https://download.pytorch.org/whl/nightly/cpu/torchaudio-2.4.0.dev20240815%2Bcpu-cp311-cp311-linux_x86_64.whl RUN pip install triton==3.1.0 py-libnuma WORKDIR /usr/src RUN git clone https://github.com/intel/intel-extension-for-pytorch && cd intel-extension-for-pytorch && git checkout b7b552baf64283b594665b8687430fe92990e497 RUN git clone https://github.com/intel/torch-ccl.git && cd torch-ccl && git checkout v2.4.0+cpu+rc0 RUN sed -i 's/VERSION_MINOR 6/VERSION_MINOR 5/' intel-extension-for-pytorch/version.txt RUN cd intel-extension-for-pytorch && git submodule sync && git submodule update --init --recursive && python setup.py install RUN cd torch-ccl && git submodule sync && git submodule update --init --recursive && pip install . ENV LD_PRELOAD=/opt/conda/lib/libtcmalloc.so ENV CCL_ROOT=/opt/conda/lib/python3.11/site-packages/oneccl_bindings_for_pytorch ENV I_MPI_ROOT=/opt/conda/lib/python3.11/site-packages/oneccl_bindings_for_pytorch ENV FI_PROVIDER_PATH=/opt/conda/lib/python3.11/site-packages/oneccl_bindings_for_pytorch/opt/mpi/libfabric/lib/prov:/usr/lib64/libfabric ENV LD_LIBRARY_PATH=/opt/conda/lib/python3.11/site-packages/oneccl_bindings_for_pytorch/opt/mpi/libfabric/lib:/opt/conda/lib/python3.11/site-packages/oneccl_bindings_for_pytorch/lib ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/opt/conda/lib/" # Install server COPY proto proto COPY server server COPY server/Makefile server/Makefile ENV UV_SYSTEM_PYTHON=1 RUN cd server && \ make gen-server && \ pip install -U pip uv && \ uv pip install -e ".[accelerate, compressed-tensors, peft, outlines]" --no-cache-dir # Install benchmarker COPY --from=builder /usr/src/target/release-opt/text-generation-benchmark /usr/local/bin/text-generation-benchmark # Install router COPY --from=builder /usr/src/target/release-opt/text-generation-router /usr/local/bin/text-generation-router # Install launcher COPY --from=builder /usr/src/target/release-opt/text-generation-launcher /usr/local/bin/text-generation-launcher FROM ${PLATFORM} AS final ENV ATTENTION=flashdecoding-ipex ENV PREFIX_CACHING=1 ENV PREFILL_CHUNKING=1 ENV CUDA_GRAPHS=0 ENTRYPOINT ["text-generation-launcher"] CMD ["--json-output"]
text-generation-inference/Dockerfile_intel/0
{ "file_path": "text-generation-inference/Dockerfile_intel", "repo_id": "text-generation-inference", "token_count": 4227 }
#[allow(clippy::derive_partial_eq_without_eq)] mod pb; mod client; mod sharded_client; pub use client::Client; pub use pb::generate::v3::{ input_chunk::Chunk, Batch, CachedBatch, FinishReason, GeneratedText, Generation, GrammarType, HealthResponse, Image, InfoResponse, Input, InputChunk, NextTokenChooserParameters, Request, StoppingCriteriaParameters, Tokens, }; pub use sharded_client::ShardedClient;
text-generation-inference/backends/client/src/v3/mod.rs/0
{ "file_path": "text-generation-inference/backends/client/src/v3/mod.rs", "repo_id": "text-generation-inference", "token_count": 142 }
#!/bin/bash set -ex TRT_VER_BASE="10.8.0" TRT_VER_FULL="${TRT_VER_BASE}.43" CUDA_VER="12.8" CUDNN_VER="9.7.0.66-1" NCCL_VER="2.25.1-1+cuda${CUDA_VER}" CUBLAS_VER="${CUDA_VER}.3.14-1" NVRTC_VER="${CUDA_VER}.61-1" for i in "$@"; do case $i in --TRT_VER=?*) TRT_VER="${i#*=}";; --CUDA_VER=?*) CUDA_VER="${i#*=}";; --CUDNN_VER=?*) CUDNN_VER="${i#*=}";; --NCCL_VER=?*) NCCL_VER="${i#*=}";; --CUBLAS_VER=?*) CUBLAS_VER="${i#*=}";; *) ;; esac shift done NVCC_VERSION_OUTPUT=$(nvcc --version) if [[ $(echo $NVCC_VERSION_OUTPUT | grep -oP "\d+\.\d+" | head -n 1) != ${CUDA_VER} ]]; then echo "The version of pre-installed CUDA is not equal to ${CUDA_VER}." exit 1 fi install_ubuntu_requirements() { apt-get update && apt-get install -y --no-install-recommends gnupg2 curl ca-certificates ARCH=$(uname -m) if [ "$ARCH" = "amd64" ];then ARCH="x86_64";fi if [ "$ARCH" = "aarch64" ];then ARCH="sbsa";fi curl -fsSLO https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2404/${ARCH}/cuda-keyring_1.1-1_all.deb dpkg -i cuda-keyring_1.1-1_all.deb rm /etc/apt/sources.list.d/cuda-ubuntu2404-x86_64.list apt-get update if [[ $(apt list --installed | grep libcudnn9) ]]; then apt-get remove --purge -y --allow-change-held-packages libcudnn9* fi if [[ $(apt list --installed | grep libnccl) ]]; then apt-get remove --purge -y --allow-change-held-packages libnccl* fi if [[ $(apt list --installed | grep libcublas) ]]; then apt-get remove --purge -y --allow-change-held-packages libcublas* fi if [[ $(apt list --installed | grep cuda-nvrtc-dev) ]]; then apt-get remove --purge -y --allow-change-held-packages cuda-nvrtc-dev* fi CUBLAS_CUDA_VERSION=$(echo $CUDA_VER | sed 's/\./-/g') apt-get install -y --no-install-recommends libcudnn9-cuda-12=${CUDNN_VER} libcudnn9-dev-cuda-12=${CUDNN_VER} apt-get install -y --no-install-recommends libnccl2=${NCCL_VER} libnccl-dev=${NCCL_VER} apt-get install -y --no-install-recommends libcublas-${CUBLAS_CUDA_VERSION}=${CUBLAS_VER} libcublas-dev-${CUBLAS_CUDA_VERSION}=${CUBLAS_VER} # NVRTC static library doesn't exist in NGC PyTorch container. NVRTC_CUDA_VERSION=$(echo $CUDA_VER | sed 's/\./-/g') apt-get install -y --no-install-recommends cuda-nvrtc-dev-${NVRTC_CUDA_VERSION}=${NVRTC_VER} apt-get clean rm -rf /var/lib/apt/lists/* } install_centos_requirements() { CUBLAS_CUDA_VERSION=$(echo $CUDA_VER | sed 's/\./-/g') yum -y update yum -y install epel-release yum remove -y libnccl* && yum -y install libnccl-${NCCL_VER} libnccl-devel-${NCCL_VER} yum remove -y libcublas* && yum -y install libcublas-${CUBLAS_CUDA_VERSION}-${CUBLAS_VER} libcublas-devel-${CUBLAS_CUDA_VERSION}-${CUBLAS_VER} yum clean all } install_tensorrt() { #PY_VERSION=$(python3 -c 'import sys; print(".".join(map(str, sys.version_info[0:2])))') #PARSED_PY_VERSION=$(echo "${PY_VERSION//./}") TRT_CUDA_VERSION="12.8" if [ -z "$RELEASE_URL_TRT" ];then ARCH=${TRT_TARGETARCH} if [ -z "$ARCH" ];then ARCH=$(uname -m);fi if [ "$ARCH" = "arm64" ];then ARCH="aarch64";fi if [ "$ARCH" = "amd64" ];then ARCH="x86_64";fi if [ "$ARCH" = "x86_64" ];then DIR_NAME="x64-agnostic"; else DIR_NAME=${ARCH};fi if [ "$ARCH" = "aarch64" ];then OS1="Ubuntu22_04" && OS2="Ubuntu-24.04" && OS="ubuntu-24.04"; else OS1="Linux" && OS2="Linux" && OS="linux";fi RELEASE_URL_TRT=https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/${TRT_VER_BASE}/tars/TensorRT-${TRT_VER_FULL}.${OS2}.${ARCH}-gnu.cuda-${TRT_CUDA_VERSION}.tar.gz fi wget --no-verbose ${RELEASE_URL_TRT} -O /tmp/TensorRT.tar tar -xf /tmp/TensorRT.tar -C /usr/local/ mv /usr/local/TensorRT-${TRT_VER_FULL} /usr/local/tensorrt # pip3 install /usr/local/tensorrt/python/tensorrt-*-cp${PARSED_PY_VERSION}-*.whl rm -rf /tmp/TensorRT.tar } # Install base packages depending on the base OS ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"') case "$ID" in debian) install_ubuntu_requirements install_tensorrt ;; ubuntu) install_ubuntu_requirements install_tensorrt ;; centos) install_centos_requirements install_tensorrt ;; *) echo "Unable to determine OS..." exit 1 ;; esac
text-generation-inference/backends/trtllm/scripts/install_tensorrt.sh/0
{ "file_path": "text-generation-inference/backends/trtllm/scripts/install_tensorrt.sh", "repo_id": "text-generation-inference", "token_count": 2083 }
/// Inspired by https://github.com/hatoo/oha/blob/bb989ea3cd77727e7743e7daa60a19894bb5e901/src/monitor.rs use crate::generation::{Decode, Message, Prefill}; use ratatui::crossterm::event::{KeyCode, KeyEvent, KeyModifiers}; use ratatui::layout::{Alignment, Constraint, Direction, Layout}; use ratatui::style::{Color, Modifier, Style}; use ratatui::text::{Line, Span}; use ratatui::widgets::{ Axis, BarChart, Block, Borders, Chart, Dataset, Gauge, GraphType, Paragraph, Tabs, }; use ratatui::{symbols, Frame}; use text_generation_client::ClientError; use tokio::sync::mpsc; /// TUI powered App pub(crate) struct App { pub(crate) running: bool, pub(crate) data: Data, completed_runs: Vec<usize>, completed_batch: usize, current_batch: usize, current_tab: usize, touched_tab: bool, zoom: bool, is_error: bool, tokenizer_name: String, sequence_length: u32, decode_length: u32, n_run: usize, receiver: mpsc::Receiver<Result<Message, ClientError>>, } impl App { pub(crate) fn new( receiver: mpsc::Receiver<Result<Message, ClientError>>, tokenizer_name: String, sequence_length: u32, decode_length: u32, n_run: usize, batch_size: Vec<u32>, ) -> Self { let current_tab = 0; let completed_runs: Vec<usize> = (0..batch_size.len()).map(|_| 0).collect(); let completed_batch = 0; let current_batch = 0; let is_error = false; let data = Data::new(n_run, batch_size); Self { running: true, data, completed_runs, completed_batch, current_batch, current_tab, touched_tab: false, zoom: false, is_error, tokenizer_name, sequence_length, decode_length, n_run, receiver, } } /// Handle crossterm key events pub(crate) fn handle_key_event(&mut self, key_event: KeyEvent) { match key_event { // Increase and wrap tab KeyEvent { code: KeyCode::Right, .. } | KeyEvent { code: KeyCode::Tab, .. } => { self.touched_tab = true; self.current_tab = (self.current_tab + 1) % self.data.batch_size.len(); } // Decrease and wrap tab KeyEvent { code: KeyCode::Left, .. } => { self.touched_tab = true; if self.current_tab > 0 { self.current_tab -= 1; } else { self.current_tab = self.data.batch_size.len() - 1; } } // Zoom on throughput/latency fig KeyEvent { code: KeyCode::Char('+'), .. } => { self.zoom = true; } // Unzoom on throughput/latency fig KeyEvent { code: KeyCode::Char('-'), .. } => { self.zoom = false; } // Quit KeyEvent { code: KeyCode::Char('q'), .. } | KeyEvent { code: KeyCode::Char('c'), modifiers: KeyModifiers::CONTROL, .. } => { self.running = false; } _ => (), } } /// Get all pending messages from generation task pub(crate) fn tick(&mut self) { while let Ok(message) = self.receiver.try_recv() { match message { Ok(message) => match message { Message::Prefill(step) => self.data.push_prefill(step, self.current_batch), Message::Decode(step) => self.data.push_decode(step, self.current_batch), Message::EndRun => { self.completed_runs[self.current_batch] += 1; } Message::EndBatch => { self.data.end_batch(self.current_batch); self.completed_batch += 1; if self.current_batch < self.data.batch_size.len() - 1 { // Only go to next tab if the user never touched the tab keys if !self.touched_tab { self.current_tab += 1; } self.current_batch += 1; } } Message::Warmup => {} }, Err(_) => self.is_error = true, } } } /// Render frame pub fn render(&mut self, f: &mut Frame) { let batch_progress = (self.completed_batch as f64 / self.data.batch_size.len() as f64).clamp(0.0, 1.0); let run_progress = (self.completed_runs[self.current_batch] as f64 / self.n_run as f64).clamp(0.0, 1.0); // Vertical layout let row5 = Layout::default() .direction(Direction::Vertical) .constraints( [ Constraint::Length(1), Constraint::Length(3), Constraint::Length(3), Constraint::Length(13), Constraint::Min(10), ] .as_ref(), ) .split(f.area()); // Top row horizontal layout let top = Layout::default() .direction(Direction::Horizontal) .constraints([Constraint::Percentage(50), Constraint::Percentage(50)].as_ref()) .split(row5[2]); // Mid row horizontal layout let mid = Layout::default() .direction(Direction::Horizontal) .constraints( [ Constraint::Percentage(25), Constraint::Percentage(25), Constraint::Percentage(25), Constraint::Percentage(25), ] .as_ref(), ) .split(row5[3]); // Left mid row vertical layout let prefill_text = Layout::default() .direction(Direction::Vertical) .constraints([Constraint::Length(8), Constraint::Length(5)].as_ref()) .split(mid[0]); // Right mid row vertical layout let decode_text = Layout::default() .direction(Direction::Vertical) .constraints([Constraint::Length(8), Constraint::Length(5)].as_ref()) .split(mid[2]); let decode_text_latency = Layout::default() .direction(Direction::Horizontal) .constraints([Constraint::Percentage(50), Constraint::Percentage(50)].as_ref()) .split(decode_text[0]); // Bottom row horizontal layout let bottom = Layout::default() .direction(Direction::Horizontal) .constraints([Constraint::Percentage(50), Constraint::Percentage(50)].as_ref()) .split(row5[4]); // Title let title = Block::default() .borders(Borders::NONE) .title(format!( "Model: {} | Sequence Length: {} | Decode Length: {}", self.tokenizer_name, self.sequence_length, self.decode_length )) .style( Style::default() .add_modifier(Modifier::BOLD) .fg(Color::White), ); f.render_widget(title, row5[0]); // Helper let helper = Block::default() .borders(Borders::NONE) .title("<- | tab | ->: change batch tab | q / CTRL + c: quit | +/-: zoom") .title_alignment(Alignment::Right) .style(Style::default().fg(Color::White)); f.render_widget(helper, row5[0]); // Batch tabs let titles: Vec<Line> = self .data .batch_size .iter() .map(|b| { Line::from(vec![Span::styled( format!("Batch: {b}"), Style::default().fg(Color::White), )]) }) .collect(); let tabs = Tabs::new(titles) .block(Block::default().borders(Borders::ALL).title("Tabs")) .select(self.current_tab) .style(Style::default().fg(Color::LightCyan)) .highlight_style( Style::default() .add_modifier(Modifier::BOLD) .bg(Color::Black), ); f.render_widget(tabs, row5[1]); // Total progress bar let color = if self.is_error { Color::Red } else { Color::LightGreen }; let batch_gauge = progress_gauge( "Total Progress", format!("{} / {}", self.completed_batch, self.data.batch_size.len()), batch_progress, color, ); f.render_widget(batch_gauge, top[0]); // Batch progress Bar let color = if self.is_error { Color::Red } else { Color::LightBlue }; let run_gauge = progress_gauge( "Batch Progress", format!( "{} / {}", self.completed_runs[self.current_batch], self.n_run ), run_progress, color, ); f.render_widget(run_gauge, top[1]); // Prefill text infos let prefill_latency_block = latency_paragraph( &mut self.data.prefill_latencies[self.current_tab], "Prefill", ); let prefill_throughput_block = throughput_paragraph(&self.data.prefill_throughputs[self.current_tab], "Prefill"); f.render_widget(prefill_latency_block, prefill_text[0]); f.render_widget(prefill_throughput_block, prefill_text[1]); // Prefill latency histogram let histo_width = 7; let bins = if mid[1].width < 2 { 0 } else { (mid[1].width as usize - 2) / (histo_width + 1) } .max(2); let histo_data = latency_histogram_data(&self.data.prefill_latencies[self.current_tab], bins); let histo_data_str: Vec<(&str, u64)> = histo_data.iter().map(|(l, v)| (l.as_str(), *v)).collect(); let prefill_histogram = latency_histogram(&histo_data_str, "Prefill").bar_width(histo_width as u16); f.render_widget(prefill_histogram, mid[1]); // Decode text info let decode_latency_block = latency_paragraph( &mut self.data.decode_latencies[self.current_tab], "Decode Total", ); let decode_token_latency_block = latency_paragraph( &mut self.data.decode_token_latencies[self.current_tab], "Decode Token", ); let decode_throughput_block = throughput_paragraph(&self.data.decode_throughputs[self.current_tab], "Decode"); f.render_widget(decode_latency_block, decode_text_latency[0]); f.render_widget(decode_token_latency_block, decode_text_latency[1]); f.render_widget(decode_throughput_block, decode_text[1]); // Decode latency histogram let histo_data = latency_histogram_data(&self.data.decode_latencies[self.current_tab], bins); let histo_data_str: Vec<(&str, u64)> = histo_data.iter().map(|(l, v)| (l.as_str(), *v)).collect(); let decode_histogram = latency_histogram(&histo_data_str, "Decode").bar_width(histo_width as u16); f.render_widget(decode_histogram, mid[3]); // Prefill latency/throughput chart let prefill_latency_throughput_chart = latency_throughput_chart( &self.data.prefill_batch_latency_throughput, &self.data.batch_size, self.zoom, "Prefill", ); f.render_widget(prefill_latency_throughput_chart, bottom[0]); // Decode latency/throughput chart let decode_latency_throughput_chart = latency_throughput_chart( &self.data.decode_batch_latency_throughput, &self.data.batch_size, self.zoom, "Decode", ); f.render_widget(decode_latency_throughput_chart, bottom[1]); } } /// App internal data struct pub(crate) struct Data { pub(crate) batch_size: Vec<u32>, pub(crate) prefill_latencies: Vec<Vec<f64>>, pub(crate) prefill_throughputs: Vec<Vec<f64>>, pub(crate) decode_latencies: Vec<Vec<f64>>, pub(crate) decode_token_latencies: Vec<Vec<f64>>, pub(crate) decode_throughputs: Vec<Vec<f64>>, pub(crate) prefill_batch_latency_throughput: Vec<(f64, f64)>, pub(crate) decode_batch_latency_throughput: Vec<(f64, f64)>, } impl Data { fn new(n_run: usize, batch_size: Vec<u32>) -> Self { let prefill_latencies: Vec<Vec<f64>> = (0..batch_size.len()) .map(|_| Vec::with_capacity(n_run)) .collect(); let prefill_throughputs: Vec<Vec<f64>> = prefill_latencies.clone(); let decode_latencies: Vec<Vec<f64>> = prefill_latencies.clone(); let decode_token_latencies: Vec<Vec<f64>> = decode_latencies.clone(); let decode_throughputs: Vec<Vec<f64>> = prefill_throughputs.clone(); let prefill_batch_latency_throughput: Vec<(f64, f64)> = Vec::with_capacity(batch_size.len()); let decode_batch_latency_throughput: Vec<(f64, f64)> = prefill_batch_latency_throughput.clone(); Self { batch_size, prefill_latencies, prefill_throughputs, decode_latencies, decode_token_latencies, decode_throughputs, prefill_batch_latency_throughput, decode_batch_latency_throughput, } } fn push_prefill(&mut self, prefill: Prefill, batch_idx: usize) { let latency = prefill.latency.as_micros() as f64 / 1000.0; self.prefill_latencies[batch_idx].push(latency); self.prefill_throughputs[batch_idx].push(prefill.throughput); } fn push_decode(&mut self, decode: Decode, batch_idx: usize) { let latency = decode.latency.as_micros() as f64 / 1000.0; let token_latency = decode.token_latency.as_micros() as f64 / 1000.0; self.decode_latencies[batch_idx].push(latency); self.decode_token_latencies[batch_idx].push(token_latency); self.decode_throughputs[batch_idx].push(decode.throughput); } fn end_batch(&mut self, batch_idx: usize) { self.prefill_batch_latency_throughput.push(( self.prefill_latencies[batch_idx].iter().sum::<f64>() / self.prefill_latencies[batch_idx].len() as f64, self.prefill_throughputs[batch_idx].iter().sum::<f64>() / self.prefill_throughputs[batch_idx].len() as f64, )); self.decode_batch_latency_throughput.push(( self.decode_latencies[batch_idx].iter().sum::<f64>() / self.decode_latencies[batch_idx].len() as f64, self.decode_throughputs[batch_idx].iter().sum::<f64>() / self.decode_throughputs[batch_idx].len() as f64, )); } } /// Progress bar fn progress_gauge(title: &str, label: String, progress: f64, color: Color) -> Gauge { Gauge::default() .block(Block::default().title(title).borders(Borders::ALL)) .gauge_style(Style::default().fg(color)) .label(Span::raw(label)) .ratio(progress) } /// Throughput paragraph fn throughput_paragraph<'a>(throughput: &[f64], name: &'static str) -> Paragraph<'a> { // Throughput average/high/low texts let throughput_texts = statis_spans(throughput, "tokens/secs"); // Throughput block Paragraph::new(throughput_texts).block( Block::default() .title(Span::raw(format!("{name} Throughput"))) .borders(Borders::ALL), ) } /// Latency paragraph fn latency_paragraph<'a>(latency: &mut [f64], name: &'static str) -> Paragraph<'a> { // Latency average/high/low texts let mut latency_texts = statis_spans(latency, "ms"); // Sort latency for percentiles float_ord::sort(latency); let latency_percentiles = crate::utils::percentiles(latency, &[50, 90, 99]); // Latency p50/p90/p99 texts let colors = [Color::LightGreen, Color::LightYellow, Color::LightRed]; for (i, (name, value)) in latency_percentiles.iter().enumerate() { let span = Line::from(vec![Span::styled( format!("{name}: {value:.2} ms"), Style::default().fg(colors[i]), )]); latency_texts.push(span); } Paragraph::new(latency_texts).block( Block::default() .title(Span::raw(format!("{name} Latency"))) .borders(Borders::ALL), ) } /// Average/High/Low spans fn statis_spans<'a>(data: &[f64], unit: &'static str) -> Vec<Line<'a>> { vec![ Line::from(vec![Span::styled( format!( "Average: {:.2} {unit}", data.iter().sum::<f64>() / data.len() as f64 ), Style::default().fg(Color::LightBlue), )]), Line::from(vec![Span::styled( format!( "Lowest: {:.2} {unit}", data.iter() .min_by(|a, b| a.total_cmp(b)) .unwrap_or(&f64::NAN) ), Style::default().fg(Color::Reset), )]), Line::from(vec![Span::styled( format!( "Highest: {:.2} {unit}", data.iter() .max_by(|a, b| a.total_cmp(b)) .unwrap_or(&f64::NAN) ), Style::default().fg(Color::Reset), )]), ] } /// Latency histogram data fn latency_histogram_data(latency: &[f64], bins: usize) -> Vec<(String, u64)> { let histo_data: Vec<(String, u64)> = { let histo = crate::utils::histogram(latency, bins); histo .into_iter() .map(|(label, v)| (format!("{label:.2}"), v as u64)) .collect() }; histo_data } /// Latency Histogram fn latency_histogram<'a>( histo_data_str: &'a Vec<(&'a str, u64)>, name: &'static str, ) -> BarChart<'a> { BarChart::default() .block( Block::default() .title(format!("{name} latency histogram")) .style(Style::default().fg(Color::LightYellow).bg(Color::Reset)) .borders(Borders::ALL), ) .data(histo_data_str.as_slice()) } /// Latency/Throughput chart fn latency_throughput_chart<'a>( latency_throughput: &'a [(f64, f64)], batch_sizes: &'a [u32], zoom: bool, name: &'static str, ) -> Chart<'a> { let latency_iter = latency_throughput.iter().map(|(l, _)| l); let throughput_iter = latency_throughput.iter().map(|(_, t)| t); // Get extreme values let min_latency: f64 = *latency_iter .clone() .min_by(|a, b| a.total_cmp(b)) .unwrap_or(&f64::NAN); let max_latency: f64 = *latency_iter .max_by(|a, b| a.total_cmp(b)) .unwrap_or(&f64::NAN); let min_throughput: f64 = *throughput_iter .clone() .min_by(|a, b| a.total_cmp(b)) .unwrap_or(&f64::NAN); let max_throughput: f64 = *throughput_iter .max_by(|a, b| a.total_cmp(b)) .unwrap_or(&f64::NAN); // Char min max values let min_x = if zoom { ((min_latency - 0.05 * min_latency) / 100.0).floor() * 100.0 } else { 0.0 }; let max_x = ((max_latency + 0.05 * max_latency) / 100.0).ceil() * 100.0; let step_x = (max_x - min_x) / 4.0; // Chart min max values let min_y = if zoom { ((min_throughput - 0.05 * min_throughput) / 100.0).floor() * 100.0 } else { 0.0 }; let max_y = ((max_throughput + 0.05 * max_throughput) / 100.0).ceil() * 100.0; let step_y = (max_y - min_y) / 4.0; // Labels let mut x_labels = vec![Span::styled( format!("{min_x:.2}"), Style::default() .add_modifier(Modifier::BOLD) .fg(Color::Gray) .bg(Color::Reset), )]; for i in 0..3 { x_labels.push(Span::styled( format!("{:.2}", min_x + ((i + 1) as f64 * step_x)), Style::default().fg(Color::Gray).bg(Color::Reset), )); } x_labels.push(Span::styled( format!("{max_x:.2}"), Style::default() .add_modifier(Modifier::BOLD) .fg(Color::Gray) .bg(Color::Reset), )); // Labels let mut y_labels = vec![Span::styled( format!("{min_y:.2}"), Style::default() .add_modifier(Modifier::BOLD) .fg(Color::Gray) .bg(Color::Reset), )]; for i in 0..3 { y_labels.push(Span::styled( format!("{:.2}", min_y + ((i + 1) as f64 * step_y)), Style::default().fg(Color::Gray).bg(Color::Reset), )); } y_labels.push(Span::styled( format!("{max_y:.2}"), Style::default() .add_modifier(Modifier::BOLD) .fg(Color::Gray) .bg(Color::Reset), )); // Chart dataset let colors = color_vec(); let datasets: Vec<Dataset> = (0..latency_throughput.len()) .map(|i| { let color_idx = i % colors.len(); Dataset::default() .name(batch_sizes[i].to_string()) .marker(symbols::Marker::Block) .style(Style::default().fg(colors[color_idx])) .graph_type(GraphType::Scatter) .data(&latency_throughput[i..(i + 1)]) }) .collect(); // Chart Chart::new(datasets) .style(Style::default().fg(Color::Cyan).bg(Color::Reset)) .block( Block::default() .title(Span::styled( format!("{name} throughput over latency"), Style::default().fg(Color::Gray).bg(Color::Reset), )) .borders(Borders::ALL), ) .x_axis( Axis::default() .title("ms") .style(Style::default().fg(Color::Gray).bg(Color::Reset)) .labels(x_labels) .bounds([min_x, max_x]), ) .y_axis( Axis::default() .title("tokens/secs") .style(Style::default().fg(Color::Gray).bg(Color::Reset)) .labels(y_labels) .bounds([min_y, max_y]), ) } // Colors for latency/throughput chart fn color_vec() -> Vec<Color> { vec![ Color::Red, Color::Green, Color::Yellow, Color::Blue, Color::Magenta, Color::Cyan, Color::Gray, Color::DarkGray, Color::LightRed, Color::LightGreen, Color::LightYellow, Color::LightBlue, Color::LightMagenta, Color::LightCyan, ] }
text-generation-inference/benchmark/src/app.rs/0
{ "file_path": "text-generation-inference/benchmark/src/app.rs", "repo_id": "text-generation-inference", "token_count": 12188 }
import pytest from text_generation.types import Parameters, Request from text_generation.errors import ValidationError def test_parameters_validation(): # Test best_of Parameters(best_of=1) with pytest.raises(ValidationError): Parameters(best_of=0) with pytest.raises(ValidationError): Parameters(best_of=-1) Parameters(best_of=2, do_sample=True) with pytest.raises(ValidationError): Parameters(best_of=2) with pytest.raises(ValidationError): Parameters(best_of=2, seed=1) # Test repetition_penalty Parameters(repetition_penalty=1) with pytest.raises(ValidationError): Parameters(repetition_penalty=0) with pytest.raises(ValidationError): Parameters(repetition_penalty=-1) # Test seed Parameters(seed=1) with pytest.raises(ValidationError): Parameters(seed=-1) # Test temperature Parameters(temperature=1) with pytest.raises(ValidationError): Parameters(temperature=0) with pytest.raises(ValidationError): Parameters(temperature=-1) # Test top_k Parameters(top_k=1) with pytest.raises(ValidationError): Parameters(top_k=0) with pytest.raises(ValidationError): Parameters(top_k=-1) # Test top_p Parameters(top_p=0.5) with pytest.raises(ValidationError): Parameters(top_p=0) with pytest.raises(ValidationError): Parameters(top_p=-1) with pytest.raises(ValidationError): Parameters(top_p=1) # Test truncate Parameters(truncate=1) with pytest.raises(ValidationError): Parameters(truncate=0) with pytest.raises(ValidationError): Parameters(truncate=-1) # Test typical_p Parameters(typical_p=0.5) with pytest.raises(ValidationError): Parameters(typical_p=0) with pytest.raises(ValidationError): Parameters(typical_p=-1) with pytest.raises(ValidationError): Parameters(typical_p=1) def test_request_validation(): Request(inputs="test") with pytest.raises(ValidationError): Request(inputs="") Request(inputs="test", stream=True) Request(inputs="test", parameters=Parameters(best_of=2, do_sample=True)) with pytest.raises(ValidationError): Request( inputs="test", parameters=Parameters(best_of=2, do_sample=True), stream=True )
text-generation-inference/clients/python/tests/test_types.py/0
{ "file_path": "text-generation-inference/clients/python/tests/test_types.py", "repo_id": "text-generation-inference", "token_count": 984 }
# Streaming ## What is Streaming? Token streaming is the mode in which the server returns the tokens one by one as the model generates them. This enables showing progressive generations to the user rather than waiting for the whole generation. Streaming is an essential aspect of the end-user experience as it reduces latency, one of the most critical aspects of a smooth experience. <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/streaming-generation-visual_360.gif" /> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/streaming-generation-visual-dark_360.gif" /> </div> With token streaming, the server can start returning the tokens one by one before having to generate the whole response. Users can have a sense of the generation's quality before the end of the generation. This has different positive effects: * Users can get results orders of magnitude earlier for extremely long queries. * Seeing something in progress allows users to stop the generation if it's not going in the direction they expect. * Perceived latency is lower when results are shown in the early stages. * When used in conversational UIs, the experience feels more natural. For example, a system can generate 100 tokens per second. If the system generates 1000 tokens, with the non-streaming setup, users need to wait 10 seconds to get results. On the other hand, with the streaming setup, users get initial results immediately, and although end-to-end latency will be the same, they can see half of the generation after five seconds. Below you can see an interactive demo that shows non-streaming vs streaming side-by-side. Click **generate** below. <div class="block dark:hidden"> <iframe src="https://osanseviero-streaming-vs-non-streaming.hf.space?__theme=light" width="850" height="350" ></iframe> </div> <div class="hidden dark:block"> <iframe src="https://osanseviero-streaming-vs-non-streaming.hf.space?__theme=dark" width="850" height="350" ></iframe> </div> ## How to use Streaming? ### Streaming with Python To stream tokens with `InferenceClient`, simply pass `stream=True` and iterate over the response. ```python from huggingface_hub import InferenceClient client = InferenceClient(base_url="http://127.0.0.1:8080") output = client.chat.completions.create( messages=[ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "Count to 10"}, ], stream=True, max_tokens=1024, ) for chunk in output: print(chunk.choices[0].delta.content) # 1 # 2 # 3 # 4 # 5 # 6 # 7 # 8 # 9 # 10 ``` The `huggingface_hub` library also comes with an `AsyncInferenceClient` in case you need to handle the requests concurrently. ```python from huggingface_hub import AsyncInferenceClient client = AsyncInferenceClient(base_url="http://127.0.0.1:8080") async def main(): stream = await client.chat.completions.create( messages=[{"role": "user", "content": "Say this is a test"}], stream=True, ) async for chunk in stream: print(chunk.choices[0].delta.content or "", end="") asyncio.run(main()) # This # is # a # test #. ``` ### Streaming with cURL To use the OpenAI Chat Completions compatible Messages API `v1/chat/completions` endpoint with curl, you can add the `-N` flag, which disables curl default buffering and shows data as it arrives from the server ```curl curl localhost:8080/v1/chat/completions \ -X POST \ -d '{ "model": "tgi", "messages": [ { "role": "system", "content": "You are a helpful assistant." }, { "role": "user", "content": "What is deep learning?" } ], "stream": true, "max_tokens": 20 }' \ -H 'Content-Type: application/json' ``` ### Streaming with JavaScript First, we need to install the `@huggingface/inference` library. `npm install @huggingface/inference` If you're using the free Inference API, you can use `HfInference`. If you're using inference endpoints, you can use `HfInferenceEndpoint`. We can create a `HfInferenceEndpoint` providing our endpoint URL and credential. ```js import { HfInferenceEndpoint } from '@huggingface/inference' const hf = new HfInferenceEndpoint('https://YOUR_ENDPOINT.endpoints.huggingface.cloud', 'hf_YOUR_TOKEN') // prompt const prompt = 'What can you do in Nuremberg, Germany? Give me 3 Tips' const stream = hf.textGenerationStream({ inputs: prompt }) for await (const r of stream) { // yield the generated token process.stdout.write(r.token.text) } ``` ## How does Streaming work under the hood? Under the hood, TGI uses Server-Sent Events (SSE). In an SSE Setup, a client sends a request with the data, opening an HTTP connection and subscribing to updates. Afterward, the server sends data to the client. There is no need for further requests; the server will keep sending the data. SSEs are unidirectional, meaning the client does not send other requests to the server. SSE sends data over HTTP, making it easy to use. SSEs are different than: * Polling: where the client keeps calling the server to get data. This means that the server might return empty responses and cause overhead. * Webhooks: where there is a bi-directional connection. The server can send information to the client, but the client can also send data to the server after the first request. Webhooks are more complex to operate as they don’t only use HTTP. If there are too many requests at the same time, TGI returns an HTTP Error with an `overloaded` error type (`huggingface_hub` returns `OverloadedError`). This allows the client to manage the overloaded server (e.g., it could display a busy error to the user or retry with a new request). To configure the maximum number of concurrent requests, you can specify `--max_concurrent_requests`, allowing clients to handle backpressure.
text-generation-inference/docs/source/conceptual/streaming.md/0
{ "file_path": "text-generation-inference/docs/source/conceptual/streaming.md", "repo_id": "text-generation-inference", "token_count": 1890 }
# Collection of Usage Statistics Text Generation Inference collects anonymous usage statistics to help us improve the service. The collected data is used to improve TGI and to understand what causes failures. The data is collected transparently and any sensitive information is omitted. Usage statistics are collected only when TGI is running in a Docker container. This prevents data collection when TGI is run directly on the host machine. The collected data includes startup and shutdown events, as well as a heartbeat signal sent every 15 minutes. ## What data is collected The code that collects the data is available [here](https://github.com/huggingface/text-generation-inference/blob/main/router/src/usage_stats.rs). As of release 2.1.2 this is an example of the data collected: - From the TGI configuration: ```json { "event_type": "start", "disable_grammar_support": false, "max_batch_prefill_tokens": 4096, "max_batch_size": null, "max_batch_total_tokens": null, "max_best_of": 2, "max_client_batch_size": 4, "max_concurrent_requests": 128, "max_input_tokens": 1024, "max_stop_sequences": 4, "max_top_n_tokens": 5, "max_total_tokens": 2048, "max_waiting_tokens": 20, "model_config": { "model_type": "Bloom" }, "revision": null, "tokenizer_class": "BloomTokenizerFast", "validation_workers": 2, "waiting_served_ratio": 1.2, "docker_label": "latest", "git_sha": "cfc118704880453d29bcbe4fbbd91dda501cf5fe", "nvidia_env": { "name": "NVIDIA A10G", "pci_bus_id": "00000000:00:1E.0", "driver_version": "535.183.01", "pstate": "P8", "pcie_link_gen_max": "4", "pcie_link_gen_current": "1", "temperature_gpu": "31", "utilization_gpu": "0 %", "utilization_memory": "0 %", "memory_total": "23028 MiB", "memory_free": "22515 MiB", "memory_used": "0 MiB", "reset_status_reset_required": "No", "reset_status_drain_and_reset_recommended": "No", "compute_cap": "8.6", "ecc_errors_corrected_volatile_total": "0", "mig_mode_current": "[N/A]", "power_draw_instant": "10.86 W", "power_limit": "300.00 W" }, "system_env": { "cpu_count": 16, "cpu_type": "AMD EPYC 7R32", "total_memory": 66681196544, "architecture": "x86_64", "platform": "linux-unix-x86_64" } } ``` ## How to opt-out By passing the `--usage-stats` to the text-generation-launcher you can control how much usage statistics are being collected. `--usage-stats=no-stack` will not emit the stack traces from errors and the error types, but will continue to send start and stop events `--usage-stats=off` will completely disable everything
text-generation-inference/docs/source/usage_statistics.md/0
{ "file_path": "text-generation-inference/docs/source/usage_statistics.md", "repo_id": "text-generation-inference", "token_count": 966 }
[ { "choices": [ { "delta": { "content": "**", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1726656043, "id": "", "model": "meta-llama/Meta-Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "2.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "Deep", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1726656043, "id": "", "model": "meta-llama/Meta-Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "2.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": " Learning", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1726656043, "id": "", "model": "meta-llama/Meta-Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "2.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": ":", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1726656043, "id": "", "model": "meta-llama/Meta-Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "2.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": " An", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1726656043, "id": "", "model": "meta-llama/Meta-Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "2.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": " Overview", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1726656043, "id": "", "model": "meta-llama/Meta-Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "2.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "**\n", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1726656044, "id": "", "model": "meta-llama/Meta-Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "2.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "================================", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1726656044, "id": "", "model": "meta-llama/Meta-Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "2.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "=====", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1726656044, "id": "", "model": "meta-llama/Meta-Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "2.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "\n\n", "role": "assistant", "tool_calls": null }, "finish_reason": "length", "index": 0, "logprobs": null } ], "created": 1726656044, "id": "", "model": "meta-llama/Meta-Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "2.2.1-dev0-native", "usage": { "completion_tokens": 10, "prompt_tokens": 40, "total_tokens": 50 } } ]
text-generation-inference/integration-tests/models/__snapshots__/test_completion_prompts/test_flash_llama_completion_stream_usage.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_completion_prompts/test_flash_llama_completion_stream_usage.json", "repo_id": "text-generation-inference", "token_count": 2511 }
{ "choices": [ { "finish_reason": "length", "index": 0, "logprobs": null, "message": { "content": "Both an elephant and a mouse are mammals. However, the differences between elephants and mice are:\n\n1", "role": "assistant" } } ], "created": 1732541189, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "chat.completion", "system_fingerprint": "2.4.1-dev0-native", "usage": { "completion_tokens": 30, "prompt_tokens": 49, "total_tokens": 79 } }
text-generation-inference/integration-tests/models/__snapshots__/test_continue_final_message/test_llama_completion_single_prompt.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_continue_final_message/test_llama_completion_single_prompt.json", "repo_id": "text-generation-inference", "token_count": 258 }
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": null, "tokens": [ { "id": 688, "logprob": -0.546875, "special": false, "text": "**" }, { "id": 103889, "logprob": -0.49023438, "special": false, "text": "Hydrogen" }, { "id": 190213, "logprob": -0.48632812, "special": false, "text": "**," }, { "id": 2611, "logprob": -0.58203125, "special": false, "text": " light" }, { "id": 578, "logprob": -0.099121094, "special": false, "text": " and" }, { "id": 2223, "logprob": -1.078125, "special": false, "text": " free" }, { "id": 235269, "logprob": -0.025756836, "special": false, "text": "," }, { "id": 108, "logprob": -0.29101562, "special": false, "text": "\n" }, { "id": 688, "logprob": -0.0035858154, "special": false, "text": "**" }, { "id": 1949, "logprob": -4.1007996e-05, "special": false, "text": "He" } ], "top_tokens": null }, "generated_text": "**Hydrogen**, light and free,\n**He" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_gemma2/test_flash_gemma2.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_gemma2/test_flash_gemma2.json", "repo_id": "text-generation-inference", "token_count": 877 }
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": 0, "tokens": [ { "id": 13, "logprob": -1.9980469, "special": false, "text": "." }, { "id": 578, "logprob": -0.15795898, "special": false, "text": " The" }, { "id": 3622, "logprob": -1.0458984, "special": false, "text": " server" }, { "id": 31680, "logprob": -1.3623047, "special": false, "text": " responds" }, { "id": 449, "logprob": 0.0, "special": false, "text": " with" }, { "id": 264, "logprob": 0.0, "special": false, "text": " a" }, { "id": 330, "logprob": -0.5678711, "special": false, "text": " \"" }, { "id": 1049, "logprob": -0.12322998, "special": false, "text": "200" }, { "id": 10619, "logprob": 0.0, "special": false, "text": " OK" }, { "id": 1, "logprob": 0.0, "special": false, "text": "\"" } ], "top_tokens": null }, "generated_text": "Test request. The server responds with a \"200 OK\"" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_exl2/test_flash_llama_exl2_all_params.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_exl2/test_flash_llama_exl2_all_params.json", "repo_id": "text-generation-inference", "token_count": 856 }
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": 0, "tokens": [ { "id": 311, "logprob": -1.4277344, "special": false, "text": " to" }, { "id": 279, "logprob": -0.65478516, "special": false, "text": " the" }, { "id": 2473, "logprob": -1.8300781, "special": false, "text": " service" }, { "id": 382, "logprob": -0.75, "special": false, "text": ".\n\n" }, { "id": 286, "logprob": -0.11621094, "special": false, "text": " " }, { "id": 549, "logprob": 0.0, "special": false, "text": " :" }, { "id": 689, "logprob": -0.48608398, "special": false, "text": "return" }, { "id": 25, "logprob": 0.0, "special": false, "text": ":" }, { "id": 5949, "logprob": -0.5756836, "special": false, "text": " Response" }, { "id": 504, "logprob": -0.24499512, "special": false, "text": " from" } ], "top_tokens": null }, "generated_text": "Test request to the service.\n\n :return: Response from" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_qwen2/test_flash_qwen2_all_params.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_qwen2/test_flash_qwen2_all_params.json", "repo_id": "text-generation-inference", "token_count": 876 }
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": null, "tokens": [ { "id": 222, "logprob": -1.9091797, "special": false, "text": "\n" }, { "id": 222, "logprob": -1.0478516, "special": false, "text": "\n" }, { "id": 40, "logprob": -3.015625, "special": false, "text": "#" }, { "id": 494, "logprob": -1.4228516, "special": false, "text": " +" }, { "id": 447, "logprob": -1.1025391, "special": false, "text": " [" }, { "id": 9009, "logprob": -0.0008444786, "special": false, "text": "markdown" }, { "id": 98, "logprob": -8.8095665e-05, "special": false, "text": "]" }, { "id": 37402, "logprob": -0.5810547, "special": false, "text": " slideshow" }, { "id": 8492, "logprob": -0.00022864342, "special": false, "text": "={\"" }, { "id": 7277, "logprob": -0.00030994415, "special": false, "text": "slide" } ], "top_tokens": null }, "generated_text": "\n\n# + [markdown] slideshow={\"slide" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": null, "tokens": [ { "id": 222, "logprob": -1.9091797, "special": false, "text": "\n" }, { "id": 222, "logprob": -1.0478516, "special": false, "text": "\n" }, { "id": 40, "logprob": -3.015625, "special": false, "text": "#" }, { "id": 494, "logprob": -1.4228516, "special": false, "text": " +" }, { "id": 447, "logprob": -1.1025391, "special": false, "text": " [" }, { "id": 9009, "logprob": -0.0008444786, "special": false, "text": "markdown" }, { "id": 98, "logprob": -8.8095665e-05, "special": false, "text": "]" }, { "id": 37402, "logprob": -0.5810547, "special": false, "text": " slideshow" }, { "id": 8492, "logprob": -0.00022864342, "special": false, "text": "={\"" }, { "id": 7277, "logprob": -0.00030994415, "special": false, "text": "slide" } ], "top_tokens": null }, "generated_text": "\n\n# + [markdown] slideshow={\"slide" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": null, "tokens": [ { "id": 222, "logprob": -1.9091797, "special": false, "text": "\n" }, { "id": 222, "logprob": -1.0478516, "special": false, "text": "\n" }, { "id": 40, "logprob": -3.015625, "special": false, "text": "#" }, { "id": 494, "logprob": -1.4228516, "special": false, "text": " +" }, { "id": 447, "logprob": -1.1025391, "special": false, "text": " [" }, { "id": 9009, "logprob": -0.0008444786, "special": false, "text": "markdown" }, { "id": 98, "logprob": -8.8095665e-05, "special": false, "text": "]" }, { "id": 37402, "logprob": -0.5810547, "special": false, "text": " slideshow" }, { "id": 8492, "logprob": -0.00022864342, "special": false, "text": "={\"" }, { "id": 7277, "logprob": -0.00030994415, "special": false, "text": "slide" } ], "top_tokens": null }, "generated_text": "\n\n# + [markdown] slideshow={\"slide" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": null, "tokens": [ { "id": 222, "logprob": -1.9091797, "special": false, "text": "\n" }, { "id": 222, "logprob": -1.0478516, "special": false, "text": "\n" }, { "id": 40, "logprob": -3.015625, "special": false, "text": "#" }, { "id": 494, "logprob": -1.4228516, "special": false, "text": " +" }, { "id": 447, "logprob": -1.1025391, "special": false, "text": " [" }, { "id": 9009, "logprob": -0.0008444786, "special": false, "text": "markdown" }, { "id": 98, "logprob": -8.8095665e-05, "special": false, "text": "]" }, { "id": 37402, "logprob": -0.5810547, "special": false, "text": " slideshow" }, { "id": 8492, "logprob": -0.00022864342, "special": false, "text": "={\"" }, { "id": 7277, "logprob": -0.00030994415, "special": false, "text": "slide" } ], "top_tokens": null }, "generated_text": "\n\n# + [markdown] slideshow={\"slide" } ]
text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder2_lora/test_flash_starcoder2_load.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder2_lora/test_flash_starcoder2_load.json", "repo_id": "text-generation-inference", "token_count": 4084 }
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": null, "tokens": [ { "id": 13, "logprob": -0.007621765, "special": false, "text": "\n" }, { "id": 13, "logprob": -0.20812988, "special": false, "text": "\n" }, { "id": 16114, "logprob": -1.2587891, "special": false, "text": "Once" }, { "id": 3714, "logprob": -0.20825195, "special": false, "text": " upon" }, { "id": 264, "logprob": -0.0017709732, "special": false, "text": " a" }, { "id": 727, "logprob": -0.011932373, "special": false, "text": " time" }, { "id": 28725, "logprob": -0.17297363, "special": false, "text": "," }, { "id": 736, "logprob": -0.9057617, "special": false, "text": " there" }, { "id": 403, "logprob": -0.05758667, "special": false, "text": " was" }, { "id": 264, "logprob": -0.00970459, "special": false, "text": " a" } ], "top_tokens": null }, "generated_text": "\n\nOnce upon a time, there was a" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": null, "tokens": [ { "id": 13, "logprob": -0.007621765, "special": false, "text": "\n" }, { "id": 13, "logprob": -0.20275879, "special": false, "text": "\n" }, { "id": 16114, "logprob": -1.2578125, "special": false, "text": "Once" }, { "id": 3714, "logprob": -0.2084961, "special": false, "text": " upon" }, { "id": 264, "logprob": -0.0017738342, "special": false, "text": " a" }, { "id": 727, "logprob": -0.011932373, "special": false, "text": " time" }, { "id": 28725, "logprob": -0.17297363, "special": false, "text": "," }, { "id": 736, "logprob": -0.9057617, "special": false, "text": " there" }, { "id": 403, "logprob": -0.05758667, "special": false, "text": " was" }, { "id": 264, "logprob": -0.00970459, "special": false, "text": " a" } ], "top_tokens": null }, "generated_text": "\n\nOnce upon a time, there was a" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": null, "tokens": [ { "id": 13, "logprob": -0.007621765, "special": false, "text": "\n" }, { "id": 13, "logprob": -0.20275879, "special": false, "text": "\n" }, { "id": 16114, "logprob": -1.2578125, "special": false, "text": "Once" }, { "id": 3714, "logprob": -0.2084961, "special": false, "text": " upon" }, { "id": 264, "logprob": -0.0017738342, "special": false, "text": " a" }, { "id": 727, "logprob": -0.011932373, "special": false, "text": " time" }, { "id": 28725, "logprob": -0.17297363, "special": false, "text": "," }, { "id": 736, "logprob": -0.9057617, "special": false, "text": " there" }, { "id": 403, "logprob": -0.05758667, "special": false, "text": " was" }, { "id": 264, "logprob": -0.00970459, "special": false, "text": " a" } ], "top_tokens": null }, "generated_text": "\n\nOnce upon a time, there was a" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": null, "tokens": [ { "id": 13, "logprob": -0.007621765, "special": false, "text": "\n" }, { "id": 13, "logprob": -0.20812988, "special": false, "text": "\n" }, { "id": 16114, "logprob": -1.2587891, "special": false, "text": "Once" }, { "id": 3714, "logprob": -0.20825195, "special": false, "text": " upon" }, { "id": 264, "logprob": -0.0017709732, "special": false, "text": " a" }, { "id": 727, "logprob": -0.011932373, "special": false, "text": " time" }, { "id": 28725, "logprob": -0.17297363, "special": false, "text": "," }, { "id": 736, "logprob": -0.9057617, "special": false, "text": " there" }, { "id": 403, "logprob": -0.05758667, "special": false, "text": " was" }, { "id": 264, "logprob": -0.00970459, "special": false, "text": " a" } ], "top_tokens": null }, "generated_text": "\n\nOnce upon a time, there was a" } ]
text-generation-inference/integration-tests/models/__snapshots__/test_llava_next/test_flash_llava_next_load.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_llava_next/test_flash_llava_next_load.json", "repo_id": "text-generation-inference", "token_count": 4048 }
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": null, "tokens": [ { "id": 42, "logprob": -0.86279297, "special": false, "text": "I" }, { "id": 1353, "logprob": -0.94921875, "special": false, "text": "'m" }, { "id": 7016, "logprob": -2.1835938, "special": false, "text": " sorry" }, { "id": 13, "logprob": -0.074035645, "special": false, "text": "," }, { "id": 1394, "logprob": -0.86376953, "special": false, "text": "You" }, { "id": 452, "logprob": -1.2070312, "special": false, "text": " have" }, { "id": 247, "logprob": -1.4365234, "special": false, "text": " a" }, { "id": 4327, "logprob": -1.109375, "special": false, "text": " choice" }, { "id": 273, "logprob": -0.93408203, "special": false, "text": " of" }, { "id": 752, "logprob": -1.8808594, "special": false, "text": " what" } ] }, "generated_text": "I'm sorry,You have a choice of what" }
text-generation-inference/integration-tests/models/__snapshots__/test_neox/test_neox.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_neox/test_neox.json", "repo_id": "text-generation-inference", "token_count": 853 }
import pytest @pytest.fixture(scope="module") def flash_deepseek_v2_handle(launcher): with launcher("deepseek-ai/DeepSeek-V2-Lite", num_shard=2) as handle: yield handle @pytest.fixture(scope="module") async def flash_deepseek_v2(flash_deepseek_v2_handle): await flash_deepseek_v2_handle.health(300) return flash_deepseek_v2_handle.client @pytest.mark.release @pytest.mark.asyncio @pytest.mark.private async def test_flash_deepseek_v2(flash_deepseek_v2, response_snapshot): response = await flash_deepseek_v2.generate( "Test request", max_new_tokens=10, decoder_input_details=True ) assert response == response_snapshot @pytest.mark.release @pytest.mark.asyncio @pytest.mark.private async def test_flash_deepseek_v2_all_params(flash_deepseek_v2, response_snapshot): response = await flash_deepseek_v2.generate( "Test request", max_new_tokens=10, repetition_penalty=1.2, return_full_text=True, stop_sequences=["test"], temperature=0.5, top_p=0.9, top_k=10, truncate=5, typical_p=0.9, watermark=True, decoder_input_details=True, seed=0, ) assert response == response_snapshot @pytest.mark.release @pytest.mark.asyncio @pytest.mark.private async def test_flash_deepseek_v2_load( flash_deepseek_v2, generate_load, response_snapshot ): responses = await generate_load( flash_deepseek_v2, "Test request", max_new_tokens=10, n=4 ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
text-generation-inference/integration-tests/models/test_flash_deepseek_v2.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_flash_deepseek_v2.py", "repo_id": "text-generation-inference", "token_count": 710 }
import pytest @pytest.fixture(scope="module") def flash_medusa_handle(launcher): with launcher( "FasterDecoding/medusa-vicuna-7b-v1.3", num_shard=2, revision="refs/pr/1" ) as handle: yield handle @pytest.fixture(scope="module") async def flash_medusa(flash_medusa_handle): await flash_medusa_handle.health(300) return flash_medusa_handle.client @pytest.mark.asyncio async def test_flash_medusa_simple(flash_medusa, response_snapshot): response = await flash_medusa.generate( "What is Deep Learning?", max_new_tokens=10, decoder_input_details=True ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio async def test_flash_medusa_all_params(flash_medusa, response_snapshot): response = await flash_medusa.generate( "What is Deep Learning?", max_new_tokens=10, repetition_penalty=1.2, return_full_text=True, stop_sequences=["test"], temperature=0.5, top_p=0.9, top_k=10, truncate=5, typical_p=0.9, watermark=True, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio async def test_flash_medusa_load(flash_medusa, generate_load, response_snapshot): responses = await generate_load( flash_medusa, "What is Deep Learning?", max_new_tokens=10, n=4 ) assert len(responses) == 4 assert all( [r.generated_text == responses[0].generated_text for r in responses] ), f"{[r.generated_text for r in responses]}" assert ( responses[0].generated_text == "\nDeep learning is a subset of machine learning" ) assert responses == response_snapshot
text-generation-inference/integration-tests/models/test_flash_medusa.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_flash_medusa.py", "repo_id": "text-generation-inference", "token_count": 749 }
import pytest import requests @pytest.fixture(scope="module") def flash_starcoder2_handle(launcher): with launcher( "bigcode/starcoder2-3b", lora_adapters=["smangrul/starcoder-3b-hugcoder"] ) as handle: yield handle @pytest.fixture(scope="module") async def flash_starcoder2(flash_starcoder2_handle): await flash_starcoder2_handle.health(300) return flash_starcoder2_handle.client @pytest.mark.asyncio async def test_flash_starcoder2(flash_starcoder2, response_snapshot): response = await flash_starcoder2.generate( "def print_hello", max_new_tokens=10, decoder_input_details=True ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio async def test_flash_starcoder2_default_params(flash_starcoder2, response_snapshot): response = await flash_starcoder2.generate( "who are you?", max_new_tokens=60, temperature=0.2, top_p=0.95, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 60 assert response == response_snapshot @pytest.mark.asyncio async def test_flash_starcoder2_load( flash_starcoder2, generate_load, response_snapshot ): responses = await generate_load( flash_starcoder2, "who are you?", max_new_tokens=10, n=4 ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot @pytest.mark.asyncio async def test_flash_starcoder2_with_hugcode_adapter( flash_starcoder2, response_snapshot ): response = requests.post( f"{flash_starcoder2.base_url}/generate", headers=flash_starcoder2.headers, json={ "inputs": "def print_hello", "parameters": { "max_new_tokens": 10, "adapter_id": "smangrul/starcoder-3b-hugcoder", "details": True, }, }, ) assert response.status_code == 200 data = response.json() assert data["generated_text"] == '_world():\n print("Hello World!")\n' assert data == response_snapshot
text-generation-inference/integration-tests/models/test_flash_starcoder2_lora.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_flash_starcoder2_lora.py", "repo_id": "text-generation-inference", "token_count": 940 }
import pytest @pytest.fixture(scope="module") def flash_smolvlm_next_handle(launcher): with launcher("HuggingFaceTB/SmolVLM-Instruct") as handle: yield handle @pytest.fixture(scope="module") async def flash_smolvlm_next(flash_smolvlm_next_handle): await flash_smolvlm_next_handle.health(300) return flash_smolvlm_next_handle.client @pytest.mark.asyncio @pytest.mark.private async def test_flash_smolvlm_next_simple_url(flash_smolvlm_next, response_snapshot): ny_skyline = "https://huggingface.co/spaces/merve/chameleon-7b/resolve/main/bee.jpg" query = "What is in this image?" response = await flash_smolvlm_next.generate( f"<|begin_of_text|><|begin_of_text|>User:![]({ny_skyline}){query}<end_of_utterance>\nAssistant:", max_new_tokens=10, seed=1337, ) print(response) assert ( response.generated_text == " A bee on a pink flower." ), f"{repr(response.generated_text)}" assert response.details.generated_tokens == 8 assert response == response_snapshot
text-generation-inference/integration-tests/models/test_smolvlm.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_smolvlm.py", "repo_id": "text-generation-inference", "token_count": 435 }
use std::error::Error; use vergen::EmitBuilder; fn main() -> Result<(), Box<dyn Error>> { // Try to get the git sha from the local git repository if EmitBuilder::builder() .fail_on_error() .git_sha(false) .emit() .is_err() { // Unable to get the git sha if let Ok(sha) = std::env::var("GIT_SHA") { // Set it from an env var println!("cargo:rustc-env=VERGEN_GIT_SHA={sha}"); } } // Set docker label if present if let Ok(label) = std::env::var("DOCKER_LABEL") { // Set it from an env var println!("cargo:rustc-env=DOCKER_LABEL={label}"); } Ok(()) }
text-generation-inference/router/build.rs/0
{ "file_path": "text-generation-inference/router/build.rs", "repo_id": "text-generation-inference", "token_count": 324 }
include Makefile-flash-att include Makefile-flash-att-v2 include Makefile-vllm include Makefile-awq include Makefile-eetq include Makefile-selective-scan include Makefile-lorax-punica include Makefile-exllamav2 include Makefile-flashinfer unit-tests: pip install -U pip uv uv pip install -e ".[dev]" pytest -s -vv -m "not private" tests gen-server: # Compile protos pip install -U pip uv uv pip install -r requirements_gen.txt mkdir text_generation_server/pb || true python -m grpc_tools.protoc -I../proto/v3 --python_out=text_generation_server/pb \ --grpc_python_out=text_generation_server/pb --mypy_out=text_generation_server/pb ../proto/v3/generate.proto find text_generation_server/pb/ -type f -name "*.py" -print0 -exec sed -i -e 's/^\(import.*pb2\)/from . \1/g' {} \; touch text_generation_server/pb/__init__.py gen-server-raw: mkdir text_generation_server/pb || true python -m grpc_tools.protoc -I../proto/v3 --python_out=text_generation_server/pb \ --grpc_python_out=text_generation_server/pb --mypy_out=text_generation_server/pb ../proto/v3/generate.proto find text_generation_server/pb/ -type f -name "*.py" -print0 -exec sed -i -e 's/^\(import.*pb2\)/from . \1/g' {} \; touch text_generation_server/pb/__init__.py install-server: gen-server uv pip install -e ".[accelerate, compressed-tensors, quantize, peft, outlines]" install: install-cuda echo "Installed server" install-cuda: install-server install-flash-attention-v2-cuda install-flash-attention uv pip install -e ".[attention,bnb,marlin,moe]" uv pip install nvidia-nccl-cu12==2.22.3 install-rocm: install-server install-flash-attention-v2-rocm install-vllm-rocm export-requirements: uv pip compile pyproject.toml --extra gen -o requirements_gen.txt --python-version 3.11 uv pip compile pyproject.toml --extra attention --extra bnb --extra accelerate --extra compressed-tensors --extra marlin --extra moe --extra quantize --extra peft --extra outlines -o requirements_cuda.txt --python-version 3.11 uv pip compile pyproject.toml --extra accelerate --extra compressed-tensors --extra quantize --extra peft --extra outlines -o requirements_intel.txt --python-version 3.11 uv pip compile pyproject.toml --extra accelerate --extra compressed-tensors --extra quantize --extra peft --extra outlines -o requirements_rocm.txt --python-version 3.11
text-generation-inference/server/Makefile/0
{ "file_path": "text-generation-inference/server/Makefile", "repo_id": "text-generation-inference", "token_count": 817 }
// Adapted from turboderp exllama: https://github.com/turboderp/exllama #define _cuda_buffers_cu #include "cuda_buffers.cuh" CudaBuffers* g_buffers[CUDA_MAX_DEVICES] = {NULL}; // __constant__ half2 q4_table[16][256]; // half2 q4_table_host[16][256]; // bool q4_table_init = false; CudaBuffers::CudaBuffers ( int _device, half* _temp_state, half* _temp_dq ) : device(_device), temp_state(_temp_state), temp_dq(_temp_dq) { cudaSetDevice(_device); cudaStreamCreate(&alt_stream_1); cudaStreamCreate(&alt_stream_2); cudaStreamCreate(&alt_stream_3); cudaEventCreate(&alt_stream_1_done); cudaEventCreate(&alt_stream_2_done); cudaEventCreate(&alt_stream_3_done); } CudaBuffers::~CudaBuffers() { cudaStreamDestroy(alt_stream_1); cudaStreamDestroy(alt_stream_2); cudaStreamDestroy(alt_stream_3); cudaEventDestroy(alt_stream_1_done); cudaEventDestroy(alt_stream_2_done); cudaEventDestroy(alt_stream_3_done); } CudaBuffers* get_buffers(const int device_index) { return g_buffers[device_index]; } void prepare_buffers_cuda ( int _device, half* _temp_state, half* _temp_dq ) { CudaBuffers* buffers = new CudaBuffers ( _device, _temp_state, _temp_dq ); g_buffers[_device] = buffers; } void cleanup_buffers_cuda() { for (int i = 0; i < CUDA_MAX_DEVICES; i++) { if (!g_buffers[i]) continue; delete g_buffers[i]; g_buffers[i] = NULL; } }
text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_buffers.cu/0
{ "file_path": "text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_buffers.cu", "repo_id": "text-generation-inference", "token_count": 680 }
#include <torch/extension.h> #include <c10/cuda/CUDAGuard.h> #include <ATen/cuda/CUDAContext.h> #include <cuda_runtime.h> #include <cuda_fp16.h> #include <cstdint> #include <cstdio> #include "config.h" #include "cuda/q_matrix.cuh" #include "cuda/q_gemm.cuh" #include "cpp/util.h" // Some decluttering macros #define TORCH_CHECK_DTYPE(__x, __dtype) TORCH_CHECK((__x).dtype() == torch::__dtype, #__x " is incorrect datatype, must be " #__dtype) #define TORCH_CHECK_DTYPE_OPT(__x, __dtype) TORCH_CHECK((__x).device().is_meta() || (__x).dtype() == torch::__dtype, #__x " is incorrect datatype, must be " #__dtype) #define TORCH_CHECK_SHAPES(__x, __dim_x, __y, __dim_y, __scale_y) TORCH_CHECK((__x).size(__dim_x) == (__y).size(__dim_y) * __scale_y, #__x " and " #__y " have incompatible shapes") #define TORCH_CHECK_SHAPES_OPT(__x, __dim_x, __y, __dim_y, __scale_y) TORCH_CHECK((__x).device().is_meta() || (__x).size(__dim_x) == (__y).size(__dim_y) * __scale_y, #__x " and " #__y " have incompatible shapes") // Quant matrix uintptr_t make_q_matrix ( torch::Tensor q_weight, torch::Tensor q_perm, torch::Tensor q_invperm, torch::Tensor q_scale, torch::Tensor q_scale_max, torch::Tensor q_groups, torch::Tensor q_group_map, torch::Tensor gptq_qzeros, torch::Tensor gptq_scales, torch::Tensor gptq_g_idx, torch::Tensor temp_dq ) { TORCH_CHECK_DTYPE(q_weight, kInt); TORCH_CHECK_DTYPE_OPT(q_perm, kShort); TORCH_CHECK_DTYPE_OPT(q_invperm, kShort); TORCH_CHECK_DTYPE_OPT(q_scale, kInt); TORCH_CHECK_DTYPE_OPT(q_scale_max, kHalf); TORCH_CHECK_DTYPE_OPT(q_groups, kShort); TORCH_CHECK_DTYPE_OPT(q_group_map, kShort); TORCH_CHECK_DTYPE_OPT(gptq_qzeros, kInt); TORCH_CHECK_DTYPE_OPT(gptq_scales, kHalf); TORCH_CHECK_DTYPE_OPT(gptq_g_idx, kInt); TORCH_CHECK_SHAPES(q_perm, 0, q_invperm, 0, 1); int device = q_weight.device().index(); int width = q_weight.size(1); int groups; int height; if (!q_scale.device().is_meta()) { TORCH_CHECK_SHAPES(q_weight, 1, q_scale, 1, 8); TORCH_CHECK_SHAPES(q_scale_max, 0, q_scale, 0, 1); groups = q_scale.size(0); height = q_invperm.size(0); } else { TORCH_CHECK_SHAPES(q_weight, 1, gptq_qzeros, 1, 8); TORCH_CHECK_SHAPES(q_weight, 1, gptq_scales, 1, 1); groups = gptq_qzeros.size(0); height = q_weight.size(0) * 8; } TORCH_CHECK(temp_dq.size(0) >= width * height, "Insufficient size of temp_dq buffer") QMatrix* m = new QMatrix ( device, height, width, groups, (uint32_t*) q_weight.data_ptr(), q_perm.device().is_meta() ? NULL : (uint16_t*) q_perm.data_ptr(), q_invperm.device().is_meta() ? NULL : (uint16_t*) q_invperm.data_ptr(), q_scale.device().is_meta() ? NULL : (uint32_t*) q_scale.data_ptr(), q_scale_max.device().is_meta() ? NULL : (half*) q_scale_max.data_ptr(), q_groups.device().is_meta() ? NULL : (uint16_t*) q_groups.data_ptr(), q_group_map.device().is_meta() ? NULL : (uint16_t*) q_group_map.data_ptr(), gptq_qzeros.device().is_meta() ? NULL : (uint32_t*) gptq_qzeros.data_ptr(), gptq_scales.device().is_meta() ? NULL : (half*) gptq_scales.data_ptr(), gptq_g_idx.device().is_meta() ? NULL : (uint32_t*) gptq_g_idx.data_ptr(), (half*) temp_dq.data_ptr() ); if (m->failed) throw std::runtime_error("CUDA out of memory"); return reinterpret_cast<uintptr_t> (m); } void gemm_half_q_half ( torch::Tensor a, uintptr_t b, torch::Tensor c, bool force_cuda ) { QMatrix* qm = reinterpret_cast<QMatrix*> (b); TORCH_CHECK_DTYPE(a, kHalf); TORCH_CHECK_DTYPE(c, kHalf); TORCH_CHECK_SHAPES(a, 0, c, 0, 1); TORCH_CHECK(qm->height == a.size(1), "a and b have incompatible shapes") TORCH_CHECK(qm->width == c.size(1), "b and c have incompatible shapes") const at::cuda::OptionalCUDAGuard device_guard(device_of(a)); gemm_half_q_half_cuda ( at::cuda::getCurrentCUDABlasHandle(), (const half*) a.data_ptr(), qm, (half*) c.data_ptr(), c.size(0), // m c.size(1), // n a.size(1), // k true, NULL, force_cuda ); } // Bindings PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def("make_q_matrix", &make_q_matrix, "make_q_matrix"); m.def("gemm_half_q_half", &gemm_half_q_half, "gemm_half_q_half"); }
text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/ext.cpp/0
{ "file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/ext.cpp", "repo_id": "text-generation-inference", "token_count": 2184 }
import os import tempfile import pytest import huggingface_hub.constants import text_generation_server.utils.hub from text_generation_server.utils.hub import ( weight_hub_files, download_weights, weight_files, EntryNotFoundError, LocalEntryNotFoundError, RevisionNotFoundError, ) @pytest.fixture() def offline(): current_value = text_generation_server.utils.hub.HF_HUB_OFFLINE text_generation_server.utils.hub.HF_HUB_OFFLINE = True yield "offline" text_generation_server.utils.hub.HF_HUB_OFFLINE = current_value @pytest.fixture() def fresh_cache(): with tempfile.TemporaryDirectory() as d: current_value = huggingface_hub.constants.HUGGINGFACE_HUB_CACHE huggingface_hub.constants.HUGGINGFACE_HUB_CACHE = d text_generation_server.utils.hub.HUGGINGFACE_HUB_CACHE = d os.environ["HUGGINGFACE_HUB_CACHE"] = d yield huggingface_hub.constants.HUGGINGFACE_HUB_CACHE = current_value os.environ["HUGGINGFACE_HUB_CACHE"] = current_value text_generation_server.utils.hub.HUGGINGFACE_HUB_CACHE = current_value @pytest.fixture() def prefetched(): model_id = "bert-base-uncased" huggingface_hub.snapshot_download( repo_id=model_id, revision="main", local_files_only=False, repo_type="model", allow_patterns=["*.safetensors"], ) yield model_id def test_weight_hub_files_offline_error(offline, fresh_cache): # If the model is not prefetched then it will raise an error with pytest.raises(EntryNotFoundError): weight_hub_files("gpt2") def test_weight_hub_files_offline_ok(prefetched, offline): # If the model is prefetched then we should be able to get the weight files from local cache filenames = weight_hub_files(prefetched) root = None assert len(filenames) == 1 for f in filenames: curroot, filename = os.path.split(f) if root is None: root = curroot else: assert root == curroot assert filename == "model.safetensors" def test_weight_hub_files(): filenames = weight_hub_files("bigscience/bloom-560m") assert filenames == ["model.safetensors"] def test_weight_hub_files_llm(): filenames = weight_hub_files("bigscience/bloom") assert filenames == [f"model_{i:05d}-of-00072.safetensors" for i in range(1, 73)] def test_weight_hub_files_empty(): with pytest.raises(EntryNotFoundError): weight_hub_files("bigscience/bloom", extension=".errors") def test_download_weights(): model_id = "bigscience/bloom-560m" filenames = weight_hub_files(model_id) files = download_weights(filenames, model_id) local_files = weight_files("bigscience/bloom-560m") assert files == local_files def test_weight_files_revision_error(): with pytest.raises(RevisionNotFoundError): weight_files("bigscience/bloom-560m", revision="error") def test_weight_files_not_cached_error(fresh_cache): with pytest.raises(LocalEntryNotFoundError): weight_files("bert-base-uncased")
text-generation-inference/server/tests/utils/test_hub.py/0
{ "file_path": "text-generation-inference/server/tests/utils/test_hub.py", "repo_id": "text-generation-inference", "token_count": 1250 }
import torch from text_generation_server.layers.attention.kv_cache import KVCache, KVScales from text_generation_server.utils.import_utils import SYSTEM from text_generation_server.models.globals import ( ATTENTION, BLOCK_SIZE, ) from text_generation_server.layers.attention import Seqlen from typing import Optional major, minor = torch.cuda.get_device_capability() is_sm75 = major == 7 and minor == 5 _PARTITION_SIZE = 512 def paged_attention( query: torch.Tensor, kv_cache: KVCache, kv_head_mapping: torch.Tensor, softmax_scale: float, block_tables: torch.Tensor, seqlen: Seqlen, max_s: int, *, kv_scales: KVScales, softcap: Optional[float] = None, ): # Adapted from: https://github.com/vllm-project/vllm/blob/f8a1e39fae05ca610be8d5a78be9d40f5274e5fc/vllm/model_executor/layers/attention.py # Copyright 2023 The vLLM team. All rights # reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # value_cache => [num_blocks, num_heads, head_size, block_size] # block_size = value_cache.shape[3] block_size = BLOCK_SIZE num_seqs, num_heads, head_size = query.shape max_num_partitions = (max_s + _PARTITION_SIZE - 1) // _PARTITION_SIZE can_scale = kv_cache.can_scale(kv_scales) # NOTE(woosuk): We use a simple heuristic to decide whether to use # PagedAttention V1 or V2. If the number of partitions is 1, we use # V1 to avoid the overhead of reduction. Also, if the number of # sequences or heads is large, we use V1 since there is enough work # to parallelize. if ATTENTION == "flashinfer": from text_generation_server.layers.attention.flashinfer import decode_state return decode_state.get().forward( query, paged_kv_cache=(kv_cache.key, kv_cache.value), logits_soft_cap=softcap, sm_scale=softmax_scale, k_scale=kv_scales.key_scale_cpu if can_scale else 1.0, v_scale=kv_scales.value_scale_cpu if can_scale else 1.0, ) elif ATTENTION == "flashdecoding": max_q = 1 max_k = max_s import flash_attn_2_cuda # TODO fixme when flash contains the fix. # Number of splits is not correctly handled # by the current path # https://github.com/Dao-AILab/flash-attention/blob/320fb59487658f033f56711efd3d61b7c7a6f8f3/csrc/flash_attn/flash_api.cpp#L577 # This fails becuase we're using causal, therefore window_right is set to 0 and the split logic is never applied. if softcap is None: softcap = 0.0 out = flash_attn_2_cuda.varlen_fwd( query, kv_cache.key, kv_cache.value, None, seqlen.cu_seqlen_q, seqlen.cu_seqlen_k, None, # pad_k None, block_tables, None, max_q, max_k, 0.0, # dropout softmax_scale, False, # zero_tensors True, # causal -1, # Window_left -1, # Window right softcap, False, # return softmax None, # generator ) return out[0] else: if softcap is not None: raise RuntimeError("Paged attention doesn't support softcapping") input_lengths = seqlen.input_lengths + seqlen.cache_lengths import attention_kernels out = torch.empty_like(query) kv_cache_dtype = "fp8" if kv_cache.dtype == torch.float8_e4m3fn else "auto" use_v1 = max_s <= 8192 and ( max_num_partitions == 1 or num_seqs * num_heads > 512 ) if use_v1: attention_kernels.paged_attention_v1( out, query, kv_cache.key, kv_cache.value, kv_cache.key.shape[1], softmax_scale, block_tables, input_lengths, block_size, max_s, None, kv_cache_dtype, kv_scales.key_scale_cpu, kv_scales.value_scale_cpu, ) else: # Run PagedAttention V2. assert _PARTITION_SIZE % block_size == 0 tmp_output = torch.empty( size=(num_seqs, num_heads, max_num_partitions, head_size), dtype=out.dtype, device=out.device, ) exp_sums = torch.empty( size=(num_seqs, num_heads, max_num_partitions), dtype=torch.float32, device=out.device, ) max_logits = torch.empty_like(exp_sums) attention_kernels.paged_attention_v2( out, exp_sums, max_logits, tmp_output, query, kv_cache.key, kv_cache.value, kv_cache.key.shape[1], softmax_scale, block_tables, input_lengths, block_size, max_s, None, kv_cache_dtype, kv_scales.key_scale_cpu, kv_scales.value_scale_cpu, ) return out try: is_ampere_or_newer = major >= 8 and minor >= 0 if not is_ampere_or_newer: raise ImportError("FlashAttention only supports Ampere GPUs or newer.") import flash_attn_2_cuda V2 = True except ImportError: try: import flash_attn_cuda V2 = False except ImportError as e: if major >= 8: architecture_suffix = f"-{SYSTEM}" raise ImportError( "Flash Attention V2 is not installed.\n" "Use the official Docker image (ghcr.io/huggingface/text-generation-inference:latest) " f"or install flash attention v2 with `cd server && make install install-flash-attention-v2{architecture_suffix}`" ) elif is_sm75: raise ImportError( "Flash Attention is not installed.\n" "Use the official Docker image (ghcr.io/huggingface/text-generation-inference:latest) " "or install flash attention with `cd server && make install install-flash-attention`" ) from e else: raise ImportError( f"GPU with CUDA capability {major} {minor} is not supported" ) from e if ATTENTION == "flashdecoding" and not V2: raise ValueError("Flash decoding requires Flash Attention V2") SUPPORTS_WINDOWING = V2 def attention( *, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, kv_cache: KVCache, kv_scales: KVScales, seqlen: Seqlen, block_tables: torch.Tensor, softmax_scale: float, window_size_left: int = -1, causal: bool = True, softcap: Optional[float] = None, ): can_scale = kv_cache.can_scale(kv_scales) if ATTENTION == "flashinfer": from text_generation_server.layers.attention.flashinfer import ( prefill_with_paged_kv_state, ) if softcap is None: softcap = 0.0 return prefill_with_paged_kv_state.get().forward( query, causal=causal, paged_kv_cache=(kv_cache.key, kv_cache.value), logits_soft_cap=softcap, sm_scale=softmax_scale, k_scale=kv_scales.key_scale_cpu if can_scale else 1.0, v_scale=kv_scales.value_scale_cpu if can_scale else 1.0, ) # If we are using flashdecoding or paged, we always use flash-attn for # the prefill. We have to branch on whether we use flash-attn v1 or v2. elif V2: out = torch.empty_like(query) if window_size_left <= 0 and window_size_left != -1: raise ValueError("`window_size_left` must be > 0 or -1") if softcap is None: softcap = 0.0 return flash_attn_2_cuda.varlen_fwd( query, # flashdecoding: pass the KV caches, paged: pass the KV. kv_cache.key if ATTENTION == "flashdecoding" else key, kv_cache.value if ATTENTION == "flashdecoding" else value, out, seqlen.cu_seqlen_q, seqlen.cu_seqlen_k, None, None, block_tables if ATTENTION == "flashdecoding" else None, None, seqlen.max_q, seqlen.max_k, 0.0, softmax_scale, False, causal, window_size_left, 0, softcap, False, None, )[0] else: if window_size_left != -1: raise NotImplementedError( "window_size_left is only available with flash attn v2" ) if softcap is not None: raise NotImplementedError("softcap is not available in flash attn v1") # Flash attention v1 requires q, k and v to have the same number of heads if key.shape[1] != query.shape[1]: # MQA expand if key.shape[1] == 1: key = key.expand(-1, query.shape[1], -1) # Grouped attention reshape else: original_shape = key.shape key = ( key.unsqueeze(2) .expand(-1, -1, query.shape[1] // key.shape[1], -1) .reshape(original_shape[0], -1, original_shape[2]) ) if value.shape[1] != query.shape[1]: # MQA expand if value.shape[1] == 1: value = value.expand(-1, query.shape[1], -1) # Grouped attention reshape else: original_shape = value.shape value = ( value.unsqueeze(2) .expand(-1, -1, query.shape[1] // value.shape[1], -1) .reshape(original_shape[0], -1, original_shape[2]) ) out = torch.empty_like(query) flash_attn_cuda.fwd( query, key, value, out, seqlen.cu_seqlen_q, seqlen.cu_seqlen_q, seqlen.max_q, seqlen.max_k, 0.0, softmax_scale, False, causal, False, 0, None, ) return out __all__ = [ "SUPPORTS_WINDOWING", "attention", "paged_attention", ]
text-generation-inference/server/text_generation_server/layers/attention/cuda.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/layers/attention/cuda.py", "repo_id": "text-generation-inference", "token_count": 5771 }
from typing import List, Union import torch from compressed_tensors.quantization import QuantizationArgs, QuantizationType from text_generation_server.layers.marlin.marlin import GPTQMarlin24Weight from text_generation_server.utils.weights import Weights, WeightsLoader class WNA16Int24Loader(WeightsLoader): """ Loader for W4A16/W8A16 INT 2:4 sparsity compressed-tensors checkpoints. """ def __init__(self, weight_args: QuantizationArgs): super().__init__() if weight_args.type != QuantizationType.INT: raise ValueError( f"{type(self).__name__} only supports wNa8 int checkpoints" ) if weight_args.strategy == "group" and weight_args.group_size is None: raise ValueError("`group_size` must be set when `actorder` is `group`") self.bits = weight_args.num_bits self.group_size = weight_args.group_size def __str__(self) -> str: quantization_type = f"W{self.bits}A16 2:4 sparsity" return f"{self.__class__.__name__} ({quantization_type})" def get_weights(self, weights: Weights, prefix: str): """ Get weights at the given prefix and apply without tensor paralllism. """ weight_packed = weights.get_tensor(f"{prefix}.weight_packed") meta = weights.get_tensor(f"{prefix}.meta") scale_packed = weights.get_tensor(f"{prefix}.scale_packed") return GPTQMarlin24Weight( weight_packed=weight_packed, meta=meta, scale_packed=scale_packed, bits=self.bits, ) def get_weights_col_packed( self, weights: Weights, prefix: str, block_sizes: Union[int, List[int]], ): weight_packed = weights.get_packed_sharded( f"{prefix}.weight_packed", dim=1, block_sizes=block_sizes ) meta = weights.get_packed_sharded( f"{prefix}.meta", dim=1, block_sizes=block_sizes ) scale_packed = weights.get_packed_sharded( f"{prefix}.scale_packed", dim=1, block_sizes=block_sizes ) return GPTQMarlin24Weight( weight_packed=weight_packed, meta=meta, scale_packed=scale_packed, bits=self.bits, ) def get_multi_weights_col(self, weights: Weights, prefixes: List[str], dim: int): weight_packed = torch.cat( [weights.get_sharded(f"{p}.weight_packed", dim=1) for p in prefixes], dim=1 ) meta = torch.cat( [weights.get_sharded(f"{p}.meta", dim=1) for p in prefixes], dim=1 ) scale_packed = torch.cat( [weights.get_sharded(f"{p}.scale_packed", dim=1) for p in prefixes], dim=1 ) return GPTQMarlin24Weight( weight_packed=weight_packed, meta=meta, scale_packed=scale_packed, bits=self.bits, ) def get_weights_row(self, weights: Weights, prefix: str): weight_packed = weights.get_sharded(f"{prefix}.weight_packed", dim=0) meta = weights.get_sharded(f"{prefix}.meta", dim=0) if self.group_size is None: scale_packed = weights.get_tensor(f"{prefix}.scale_packed") else: scale_packed = weights.get_sharded(f"{prefix}.scale_packed", dim=0) return GPTQMarlin24Weight( weight_packed=weight_packed, meta=meta, scale_packed=scale_packed, bits=self.bits, )
text-generation-inference/server/text_generation_server/layers/compressed_tensors/wna16_int_24.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/layers/compressed_tensors/wna16_int_24.py", "repo_id": "text-generation-inference", "token_count": 1629 }
from text_generation_server.layers.marlin.fp8 import GPTQMarlinFP8Linear from text_generation_server.layers.marlin.gptq import ( GPTQMarlinWeightsLoader, can_use_gptq_marlin, repack_gptq_for_marlin, ) from text_generation_server.layers.marlin.marlin import MarlinWeightsLoader __all__ = [ "GPTQMarlinFP8Linear", "GPTQMarlinWeightsLoader", "MarlinWeightsLoader", "can_use_gptq_marlin", "repack_gptq_for_marlin", ]
text-generation-inference/server/text_generation_server/layers/marlin/__init__.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/layers/marlin/__init__.py", "repo_id": "text-generation-inference", "token_count": 195 }
import torch import torch.distributed from typing import Optional, Type from transformers import ( PreTrainedTokenizerBase, ) from text_generation_server.models import CausalLM from text_generation_server.models.causal_lm import CausalLMBatch from text_generation_server.pb import generate_pb2 class BloomCausalLMBatch(CausalLMBatch): @classmethod def from_pb( cls, pb: generate_pb2.Batch, tokenizer: PreTrainedTokenizerBase, dtype: torch.dtype, device: torch.device, ) -> "CausalLMBatch": batch = super().from_pb(pb=pb, tokenizer=tokenizer, dtype=dtype, device=device) batch.keys_head_dim_last = False return batch class BLOOMSharded(CausalLM): @property def batch_type(self) -> Type[CausalLMBatch]: return BloomCausalLMBatch def forward( self, input_ids, attention_mask, position_ids, past_key_values: Optional = None ): outputs, speculative_logits = self.model.forward( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=True, ) logits = outputs.logits return logits, speculative_logits, outputs.past_key_values
text-generation-inference/server/text_generation_server/models/bloom.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/bloom.py", "repo_id": "text-generation-inference", "token_count": 543 }
# coding=utf-8 # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.distributed from torch import nn from transformers.activations import ACT2FN from transformers.modeling_utils import PreTrainedModel from transformers.models.gpt_neox import GPTNeoXConfig as TransformersGPTNeoXConfig from typing import Optional, List, Tuple from text_generation_server.layers.attention import ( paged_attention, attention, Seqlen, ) from text_generation_server.layers import ( TensorParallelRowLinear, TensorParallelColumnLinear, TensorParallelEmbedding, SpeculativeHead, get_linear, ) from text_generation_server.layers.attention.kv_cache import get_kv_scales from text_generation_server.layers.layernorm import ( FastLayerNorm, ) from text_generation_server.layers.rotary import ( PositionRotaryEmbedding, ) from text_generation_server.utils.weights import UnquantizedWeight class GPTNeoXConfig(TransformersGPTNeoXConfig): attribute_map = { "num_key_value_heads": "num_attention_heads", } def load_row(config, prefix: str, weights, bias: bool): weight = weights.get_weights_row(prefix) if bias and weights.process_group.rank() == 0: # Rank is only on the first rank process bias = weights.get_tensor(f"{prefix}.bias") else: bias = None linear = get_linear(weight, bias) if config.use_parallel_residual: return linear else: return TensorParallelRowLinear(linear, process_group=weights.process_group) def load_qkv(config, prefix: str, weights, num_heads, head_size, hidden_size): weight = weights.get_multi_weights_col([prefix], dim=0) if isinstance(weight, UnquantizedWeight): # Only on non quantized versions weight.weight = ( weight.weight.view( num_heads, 3, head_size, hidden_size, ) .permute(1, 0, 2, 3) .reshape(-1, hidden_size) ) bias = weights.get_sharded(f"{prefix}.bias", dim=0) bias = bias.view(num_heads, 3, head_size).permute(1, 0, 2).reshape(-1) linear = get_linear(weight, bias) if config.use_parallel_residual: return linear else: return TensorParallelColumnLinear(linear) class FlashNeoxAttention(torch.nn.Module): def __init__(self, config, prefix, weights): super().__init__() num_heads = config.num_attention_heads hidden_size = config.hidden_size self.num_heads = num_heads self.hidden_size = hidden_size self.head_size = hidden_size // num_heads self.rotary_dim = int(config.rotary_pct * self.head_size) if self.num_heads % weights.process_group.size() != 0: raise ValueError( f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} " f"and `num_shards`: {weights.process_group.size()}" ) self.num_heads = self.num_heads // weights.process_group.size() self.rotary_emb = PositionRotaryEmbedding.static( config=config, dim=self.rotary_dim, base=config.rotary_emb_base, device=weights.device, ) self.softmax_scale = self.head_size ** (-0.5) self.query_key_value = load_qkv( config, prefix=f"{prefix}.query_key_value", weights=weights, num_heads=self.num_heads, head_size=self.head_size, hidden_size=self.hidden_size, ) self.kv_scales = get_kv_scales(weights, f"{prefix}") self.dense = load_row( config, prefix=f"{prefix}.dense", weights=weights, bias=True ) self.kv_head_mapping = torch.arange( 0, self.num_heads, dtype=torch.int32, device=weights.device ) def forward( self, hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, ): qkv = self.query_key_value(hidden_states) qkv = qkv.view(-1, 3, self.num_heads, self.head_size) # Compute rotary embeddings on rotary_ndims query_rot = qkv[:, 0][..., : self.rotary_dim] query_pass = qkv[:, 0][..., self.rotary_dim :] key_rot = qkv[:, 1][..., : self.rotary_dim] key_pass = qkv[:, 1][..., self.rotary_dim :] # Inplace rotary self.rotary_emb(query_rot, key_rot, cos, sin) qkv[:, 0] = torch.cat((query_rot, query_pass), dim=-1) qkv[:, 1] = torch.cat((key_rot, key_pass), dim=-1) kv_cache.store( key=qkv[:, 1], value=qkv[:, 2], slots=slots, kv_scales=self.kv_scales, ) # Prefill if cu_seqlen_prefill is not None: # flash attention attn_output = attention( query=qkv[:, 0], key=qkv[:, 1], value=qkv[:, 2], kv_cache=kv_cache, kv_scales=self.kv_scales, seqlen=seqlen, block_tables=block_tables, softmax_scale=self.softmax_scale, ) # Decode else: attn_output = paged_attention( qkv[:, 0], kv_cache, self.kv_head_mapping, self.softmax_scale, block_tables, seqlen, max_s, kv_scales=self.kv_scales, ) return self.dense(attn_output.view(-1, self.num_heads * self.head_size)) class FlashMLP(nn.Module): def __init__(self, config, prefix, weights): super().__init__() act = config.hidden_act self.act = ( ACT2FN[act] if "gelu" not in act else lambda x: torch.nn.functional.gelu( x, approximate=( "tanh" if act in ["gelu_fast", "gelu_pytorch_tanh"] else "none" ), ) ) self.dense_h_to_4h = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.dense_h_to_4h", weights=weights, bias=True ) self.dense_4h_to_h = load_row( config, prefix=f"{prefix}.dense_4h_to_h", weights=weights, bias=True ) def forward(self, hidden_states): hidden_states = self.dense_h_to_4h(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dense_4h_to_h(hidden_states) return hidden_states class FlashNeoXLayer(nn.Module): def __init__(self, layer_id, config, weights): super().__init__() layer_norm_eps = config.layer_norm_eps prefix = f"gpt_neox.layers.{layer_id}" self.use_parallel_residual = config.use_parallel_residual self.input_layernorm = FastLayerNorm.load( prefix=f"{prefix}.input_layernorm", weights=weights, eps=layer_norm_eps ) self.post_attention_layernorm = FastLayerNorm.load( prefix=f"{prefix}.post_attention_layernorm", weights=weights, eps=layer_norm_eps, ) self.attention = FlashNeoxAttention( config, prefix=f"{prefix}.attention", weights=weights ) self.mlp = FlashMLP(config, prefix=f"{prefix}.mlp", weights=weights) self.process_group = weights.process_group def forward( self, hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, ): if self.use_parallel_residual: ln1_hidden_states, _ = self.input_layernorm(hidden_states) attn_output = self.attention( ln1_hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, ) ln2_hidden_states, _ = self.post_attention_layernorm(hidden_states) mlp_output = self.mlp(ln2_hidden_states) intermediate = mlp_output + attn_output if self.process_group.size() > 1: torch.distributed.all_reduce(intermediate, group=self.process_group) return intermediate + hidden_states, None else: hidden_states, residual = self.input_layernorm(hidden_states, residual) hidden_states = self.attention( hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, ) hidden_states, residual = self.post_attention_layernorm( hidden_states, residual ) mlp_output = self.mlp(hidden_states) return mlp_output, residual class FlashGPTNeoXPreTrainedModel(PreTrainedModel): config_class = GPTNeoXConfig base_model_prefix = "gpt_neox" supports_gradient_checkpointing = False _no_split_modules = None class FlashGPTNeoXModel(FlashGPTNeoXPreTrainedModel): def __init__(self, prefix: str, config, weights): super().__init__(config) self.config = config self.embed_in = TensorParallelEmbedding( prefix=f"{prefix}.embed_in", weights=weights ) self.layers = nn.ModuleList( [ FlashNeoXLayer(layer_id, config, weights) for layer_id in range(config.num_hidden_layers) ] ) self.final_layer_norm = FastLayerNorm.load( prefix=f"{prefix}.final_layer_norm", weights=weights, eps=config.layer_norm_eps, ) self.gradient_checkpointing = False self.head_size = self.layers[0].attention.head_size self.num_heads = self.layers[0].attention.num_heads def forward( self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int, ) -> torch.Tensor: hidden_states = self.embed_in(input_ids) # Get rotary cos and sin for this forward # Avoid to index in each layer cos, sin = self.layers[0].attention.rotary_emb.get_cos_sin( position_ids, max_s, hidden_states.dtype ) residual = None for i, layer in enumerate(self.layers): hidden_states, residual = layer( hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache[i], block_tables, slots, seqlen, max_s, ) hidden_states, _ = self.final_layer_norm(hidden_states, residual) return hidden_states class FlashGPTNeoXForCausalLM(FlashGPTNeoXPreTrainedModel): def __init__(self, prefix, config, weights): super().__init__(config) if not prefix: prefix = "gpt_neox" else: prefix = f"{prefix}.gpt_neox" self.gpt_neox = FlashGPTNeoXModel(prefix, config, weights) self.embed_out = SpeculativeHead.load( config, prefix="embed_out", weights=weights ) def forward( self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int, prefill_cache_indices: Optional[torch.Tensor], lm_head_indices: Optional[torch.Tensor] = None, adapter_data: Optional[torch.Tensor] = None, ) -> torch.Tensor: hidden_states = self.gpt_neox( input_ids, position_ids, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, ) if lm_head_indices is not None: hidden_states = hidden_states[lm_head_indices] logits = self.embed_out(hidden_states) return logits
text-generation-inference/server/text_generation_server/models/custom_modeling/flash_neox_modeling.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/flash_neox_modeling.py", "repo_id": "text-generation-inference", "token_count": 6670 }
# coding=utf-8 # Copyright 2024 the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Llava-NeXT model.""" from typing import List, Optional, Tuple import torch import torch.utils.checkpoint from torch import nn from transformers.activations import ACT2FN from transformers.image_processing_utils import select_best_resolution from text_generation_server.layers.attention import Seqlen from text_generation_server.models.custom_modeling.vlm import ( load_text_model, load_vision_model, ) from text_generation_server.layers import ( TensorParallelColumnLinear, TensorParallelRowLinear, ) def get_anyres_image_grid_shape(image_size, grid_pinpoints, patch_size): """ Calculate the shape of the image patch grid after the preprocessing for images of any resolution. Args: image_size (`tuple`): The size of the input image in the format (height, width). grid_pinpoints (`List`): A list containing possible resolutions. Each item in the list should be a tuple or list of the form `(height, width)`. patch_size (`int`): The size of each image patch. Returns: tuple: The shape of the image patch grid in the format (height, width). """ if not isinstance(grid_pinpoints, list): raise ValueError("grid_pinpoints should be a list of tuples or lists") height, width = select_best_resolution(image_size, grid_pinpoints) return height // patch_size, width // patch_size def unpad_image(tensor, original_size): """ Unpads a PyTorch tensor of a padded and resized image. Args: tensor (`torch.Tensor`): The image tensor, assumed to be of shape (num_channels, height, width). original_size (`tuple`): The original size of the image (height, width). Returns: `torch.Tensor`: The unpadded image tensor. """ original_height, original_width = original_size current_height, current_width = tensor.shape[1:] original_aspect_ratio = original_width / original_height current_aspect_ratio = current_width / current_height if original_aspect_ratio > current_aspect_ratio: scale_factor = current_width / original_width new_height = int(original_height * scale_factor) padding = (current_height - new_height) // 2 unpadded_tensor = tensor[:, padding : current_height - padding, :] else: scale_factor = current_height / original_height new_width = int(original_width * scale_factor) padding = (current_width - new_width) // 2 unpadded_tensor = tensor[:, :, padding : current_width - padding] return unpadded_tensor # Copied from transformers.models.llava.modeling_llava.LlavaMultiModalProjector with Llava->LlavaNext class LlavaNextMultiModalProjector(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.linear_1 = TensorParallelColumnLinear.load( prefix=f"{prefix}.linear_1", config=config, weights=weights, bias=True ) self.act = ACT2FN[config.projector_hidden_act] self.linear_2 = TensorParallelRowLinear.load( prefix=f"{prefix}.linear_2", config=config, weights=weights, bias=True ) def forward(self, image_features): hidden_states = self.linear_1(image_features) hidden_states = self.act(hidden_states) hidden_states = self.linear_2(hidden_states) return hidden_states class LlavaNextForConditionalGeneration(nn.Module): def __init__(self, prefix, config, weights): super().__init__() config.vision_config.quantize = config.quantize vision_config = config.vision_config # Instead of selecting in hidden_states[-2]. # Instead compute only the n -2 + 1 layers and don't pool if config.vision_feature_layer < 0: vision_config.num_hidden_layers += config.vision_feature_layer + 1 else: vision_config.num_hidden_layers = config.vision_feature_layer + 1 self.vision_tower = load_vision_model( prefix="vision_tower" if not prefix else f"{prefix}.vision_tower", config=config.vision_config, weights=weights, ) self.multi_modal_projector = LlavaNextMultiModalProjector( prefix="multi_modal_projector", config=config, weights=weights ) self.image_newline = weights.get_tensor("image_newline") self.vocab_size = config.text_config.vocab_size self.config = config config.text_config.quantize = config.quantize config.text_config.speculator = config.speculator self.text_model = load_text_model( prefix="language_model" if not prefix else f"{prefix}.language_model", config=config.text_config, weights=weights, ) self.pad_token_id = ( config.pad_token_id if config.pad_token_id is not None else -1 ) def _merge_input_ids_with_image_features( self, input_ids: torch.Tensor, inputs_embeds: torch.Tensor, image_features: torch.Tensor, ): """In place merges in vision_embeddings with inputs_embeds.""" mask = input_ids == self.config.image_token_index # Let's pray we have enabled enough slots ! try: inputs_embeds[mask] = image_features.view(-1, image_features.shape[-1]) except Exception as e: raise RuntimeError( f"Cannot fill images right now. If error happens at warmup, make sure you have enough `--max-input-tokens` to handle images. If error happens at regular runtime, please fill in an issue: {e}" ) return inputs_embeds def forward( self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int, prefill_cache_indices: Optional[torch.Tensor], lm_head_indices: Optional[torch.Tensor] = None, pixel_values: torch.FloatTensor = None, # Unused for this model pixel_attention_mask=None, image_sizes: Optional[torch.LongTensor] = None, adapter_data: Optional[torch.Tensor] = None, image_grid_thw: Optional[torch.LongTensor] = None, ): inputs_embeds = self.text_model.embed_tokens(input_ids) if pixel_values is not None and len(pixel_values) > 0: # num_special_image_tokens = (input_ids == self.config.image_token_index).sum() # assert num_special_image_tokens == len(pixel_values), f"Received {num_special_image_tokens} for {len(pixel_values)} images, this is invalid" # 1. Extract the input embeddings # 2. Merge text and images num_images, num_patches, channels, height, width = pixel_values.shape pixel_values = pixel_values.view( num_images * num_patches, channels, height, width ) image_features = self.vision_tower(pixel_values) # selected_image_feature = image_features.hidden_states[self.config.vision_feature_layer] # Already done within the clip model selected_image_feature = image_features.last_hidden_state if self.config.vision_feature_select_strategy == "default": selected_image_feature = selected_image_feature[:, 1:] elif self.config.vision_feature_select_strategy == "full": selected_image_feature = selected_image_feature else: raise RuntimeError( f"Strategy `{self.config.vision_feature_select_strategy}` is not supported/valid." ) image_features = self.multi_modal_projector(selected_image_feature) # split up image_features for each of the individual images # hence we get a list of image_features, each of shape (5, num_patches, hidden_size) # if we assume each image has 5 image features (base image + 4 patches) split_sizes = [num_patches] * num_images image_features = torch.split(image_features, split_sizes, dim=0) # NOTE we only support multimodal_patch_merge_type == "spatial_unpad" height = width = ( self.config.vision_config.image_size // self.config.vision_config.patch_size ) new_image_features = [] for image_idx, image_feature in enumerate(image_features): if image_feature.shape[0] > 1: base_image_feature = image_feature[0] image_feature = image_feature[1:] if height * width != base_image_feature.shape[0]: raise ValueError( "The number of patches is not consistent with the image size." ) # Dimensions are intentionally swapped to be bug-compatible with # upstream: https://github.com/LLaVA-VL/LLaVA-NeXT/issues/59 num_patch_width, num_patch_height = get_anyres_image_grid_shape( image_sizes[image_idx], self.config.image_grid_pinpoints, self.config.vision_config.image_size, ) image_feature = image_feature.view( num_patch_height, num_patch_width, height, width, -1 ) image_feature = image_feature.permute(4, 0, 2, 1, 3).contiguous() image_feature = image_feature.flatten(1, 2).flatten(2, 3) image_feature = unpad_image(image_feature, image_sizes[image_idx]) image_feature = torch.cat( ( image_feature, self.image_newline[:, None, None].expand( *image_feature.shape[:-1], 1 ), ), dim=-1, ) image_feature = image_feature.flatten(1, 2).transpose(0, 1) image_feature = torch.cat( (base_image_feature, image_feature), dim=0 ) else: image_feature = image_feature[0] image_feature = torch.cat( (image_feature, self.image_newline[None]), dim=0 ) new_image_features.append(image_feature) image_features = torch.stack(new_image_features, dim=0) inputs_embeds = self._merge_input_ids_with_image_features( input_ids, inputs_embeds, image_features ) hidden_states = self.text_model.model( inputs_embeds=inputs_embeds, position_ids=position_ids, cu_seqlen_prefill=cu_seqlen_prefill, kv_cache=kv_cache, block_tables=block_tables, slots=slots, seqlen=seqlen, max_s=max_s, true_max_s=max_s, prefill_cache_indices=None, adapter_data=adapter_data, ) if lm_head_indices is not None: hidden_states = hidden_states[lm_head_indices] logits, speculative_logits = self.text_model.lm_head(hidden_states) return logits, speculative_logits
text-generation-inference/server/text_generation_server/models/custom_modeling/llava_next.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/llava_next.py", "repo_id": "text-generation-inference", "token_count": 5448 }
import torch import triton import triton.language as tl from loguru import logger from typing import List, Optional from torch.utils._triton import has_triton as has_triton_torch from text_generation_server.utils.import_utils import ( SYSTEM, ) from text_generation_server.utils.log import log_master _HAS_TRITON: Optional[bool] = None def has_triton(): global _HAS_TRITON if _HAS_TRITON is None: # FIXME: it seems that has_triton_torch is bugged on RocM # For now, only accept cuda _HAS_TRITON = has_triton_torch() if SYSTEM == "cuda" else False if _HAS_TRITON: log_master(logger.info, "Using optimized Triton indexing kernels.") return _HAS_TRITON def block_tables_to_padded( max_blocks: int, cu_seqlen: torch.Tensor, block_tables: torch.Tensor, block_tables_ragged: torch.Tensor, ): def grid(meta): return ( triton.cdiv(max_blocks, meta["BLOCK_SIZE"]), len(block_tables), ) triton_block_tables_to_padded[grid]( cu_seqlen, block_tables, block_tables_ragged, block_tables.shape[1], BLOCK_SIZE=256, ) def block_tables_to_ragged( *, block_tables: torch.Tensor, input_lengths: List[int], cache_lengths: List[int], input_lengths_tensor: torch.Tensor, cache_lengths_tensor: torch.Tensor, max_current_length: int, ) -> torch.Tensor: """Convert block table to ragged format compatible with FlashInfer.""" assert len(input_lengths) == len(cache_lengths) total_len = sum(input_lengths) + sum(cache_lengths) block_tables_ragged = torch.empty( total_len, dtype=torch.int32, device=block_tables.device ) if has_triton(): cu_seqlen = input_lengths_tensor.new_zeros(input_lengths_tensor.shape[0] + 1) torch.cumsum( input_lengths_tensor + cache_lengths_tensor, out=cu_seqlen[1:], dim=0 ) def grid(meta): return ( triton.cdiv(max_current_length, meta["BLOCK_SIZE"]), len(cache_lengths), ) triton_block_tables_to_ragged[grid]( cu_seqlen, block_tables, block_tables_ragged, block_tables.shape[1], BLOCK_SIZE=256, ) else: offset = 0 for i, (input_length, cache_length) in enumerate( zip(input_lengths, cache_lengths) ): seq_len = cache_length + input_length block_tables_ragged[offset : offset + seq_len] = block_tables[i][:seq_len] offset += seq_len return block_tables_ragged def copy_next_input_ids_inplace( max_next_input_ids: int, all_input_ids: torch.Tensor, cache_lengths: torch.Tensor, input_lengths: torch.Tensor, prompt_lengths: torch.Tensor, next_input_ids: torch.Tensor, cu_accepted_ids: torch.Tensor, ): def grid(meta): return ( triton.cdiv(max_next_input_ids, meta["BLOCK_SIZE"]), len(all_input_ids), ) triton_copy_next_input_ids_inplace[grid]( all_input_ids, cache_lengths, input_lengths, prompt_lengths, next_input_ids, cu_accepted_ids, all_input_ids.shape[1], BLOCK_SIZE=16, ) def prepare_position_slot_ids( max_input_length: int, cache_lengths: torch.Tensor, cu_seqlen: torch.Tensor, cu_slots: torch.Tensor, position_ids: torch.Tensor, slot_indices: torch.Tensor, ): def grid(meta): return ( triton.cdiv(max_input_length, meta["BLOCK_SIZE"]), len(cache_lengths), ) triton_prepare_position_slot_ids[grid]( cache_lengths, cu_seqlen, cu_slots, position_ids, slot_indices, BLOCK_SIZE=256 ) def slots_filtering( max_slots: int, slots: torch.Tensor, filtered_slots: torch.Tensor, cu_slots: torch.Tensor, slots_start: torch.Tensor, ): def grid(meta): return ( triton.cdiv(max_slots, meta["BLOCK_SIZE"]), len(slots_start), ) triton_slots_filtering[grid]( slots, filtered_slots, slots_start, cu_slots, BLOCK_SIZE=256 ) @triton.jit def triton_slots_filtering( # Inputs slots_ptr, filtered_slots_ptr, slots_start_ptr, cu_slots_ptr, # Const values BLOCK_SIZE: "tl.constexpr", ): # Position in block_tables_ragged.numel() / BLOCK_SIZE pid = tl.program_id(axis=0) # Position in batch bid = tl.program_id(axis=1) block_start = pid * BLOCK_SIZE block_arange = block_start + tl.arange(0, BLOCK_SIZE) filter_start = tl.load(slots_start_ptr + bid) slot_start = tl.load(cu_slots_ptr + bid) slot_end = tl.load(cu_slots_ptr + bid + 1) mask = (slot_start + block_arange) < slot_end slots = tl.load(slots_ptr + filter_start + block_arange, mask=mask) tl.store(filtered_slots_ptr + slot_start + block_arange, slots, mask=mask) @triton.jit def triton_block_tables_to_padded( # Inputs cu_seqlen_ptr, # Outputs block_tables_ptr, block_tables_ragged_ptr, # Stride stride_block_tables, # Const values BLOCK_SIZE: "tl.constexpr", ): # Position in block_tables_ragged.numel() / BLOCK_SIZE pid = tl.program_id(axis=0) # Position in batch bid = tl.program_id(axis=1) block_start = pid * BLOCK_SIZE block_arange = block_start + tl.arange(0, BLOCK_SIZE) seq_start = tl.load(cu_seqlen_ptr + bid) seq_end = tl.load(cu_seqlen_ptr + bid + 1) mask = (seq_start + block_arange) < seq_end blocks = tl.load(block_tables_ragged_ptr + seq_start + block_arange, mask=mask) tl.store( block_tables_ptr + bid * stride_block_tables + block_arange, blocks, mask=mask ) @triton.jit def triton_block_tables_to_ragged( # Inputs cu_seqlen_ptr, # Outputs block_tables_ptr, block_tables_ragged_ptr, # Stride stride_block_tables, # Const values BLOCK_SIZE: "tl.constexpr", ): # Position in block_tables_ragged.numel() / BLOCK_SIZE pid = tl.program_id(axis=0) # Position in batch bid = tl.program_id(axis=1) block_start = pid * BLOCK_SIZE block_arange = block_start + tl.arange(0, BLOCK_SIZE) seq_start = tl.load(cu_seqlen_ptr + bid) seq_end = tl.load(cu_seqlen_ptr + bid + 1) mask = (seq_start + block_arange) < seq_end blocks = tl.load( block_tables_ptr + bid * stride_block_tables + block_arange, mask=mask ) tl.store(block_tables_ragged_ptr + seq_start + block_arange, blocks, mask=mask) @triton.jit def triton_copy_next_input_ids_inplace( # Inputs all_input_ids_ptr, cache_lengths_ptr, input_lengths_ptr, prompt_lengths_ptr, next_input_ids_ptr, cu_accepted_ids_ptr, # Stride stride_all_input_ids, # Const values BLOCK_SIZE: "tl.constexpr", ): # Position in max_accepted_ids / BLOCK_SIZE pid = tl.program_id(axis=0) # Position in batch bid = tl.program_id(axis=1) block_start = pid * BLOCK_SIZE block_arange = block_start + tl.arange(0, BLOCK_SIZE) # Used for correctly indexing in all_input_ids cache_length = tl.load(cache_lengths_ptr + bid) input_length = tl.load(input_lengths_ptr + bid) prompt_length = tl.load(prompt_lengths_ptr + bid) # Start/End of next_input_ids for this request next_input_ids_start = tl.load(cu_accepted_ids_ptr + bid) next_input_ids_end = tl.load(cu_accepted_ids_ptr + bid + 1) # Mask values out of range mask = (next_input_ids_start + block_arange) < next_input_ids_end # Mask values for request still prefilling decode_mask = (cache_length + input_length + block_arange) >= prompt_length mask = mask & decode_mask # Load this request next input ids next_input_ids = tl.load( next_input_ids_ptr + next_input_ids_start + block_arange, mask=mask ) # Store in all_input_ids, since it is a 2D tensor, apply stride * bid tl.store( all_input_ids_ptr + stride_all_input_ids * bid + cache_length + input_length + block_arange, next_input_ids, mask=mask, ) @triton.jit def triton_prepare_position_slot_ids( # Inputs cache_lengths_ptr, cu_seqlen_ptr, cu_slots_ptr, # Outputs position_ids_ptr, slot_indices_ptr, # Const values BLOCK_SIZE: "tl.constexpr", ): # Position in max_input_length / BLOCK_SIZE pid = tl.program_id(axis=0) # Position in batch bid = tl.program_id(axis=1) block_start = pid * BLOCK_SIZE block_arange = block_start + tl.arange(0, BLOCK_SIZE) cache_length = tl.load(cache_lengths_ptr + bid) seq_start = tl.load(cu_seqlen_ptr + bid) seq_end = tl.load(cu_seqlen_ptr + bid + 1) slot_start = tl.load(cu_slots_ptr + bid) mask = (seq_start + block_arange) < seq_end tl.store( position_ids_ptr + seq_start + block_arange, cache_length + block_arange, mask=mask, ) tl.store( slot_indices_ptr + seq_start + block_arange, slot_start + cache_length + block_arange, mask=mask, )
text-generation-inference/server/text_generation_server/models/metadata_kernels.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/metadata_kernels.py", "repo_id": "text-generation-inference", "token_count": 4276 }
import time import os from datetime import timedelta from loguru import logger from pathlib import Path from typing import Optional, List from huggingface_hub import file_download, hf_api, HfApi, hf_hub_download from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE from huggingface_hub.utils import ( LocalEntryNotFoundError, EntryNotFoundError, RevisionNotFoundError, # noqa # Import here to ease try/except in other part of the lib ) WEIGHTS_CACHE_OVERRIDE = os.getenv("WEIGHTS_CACHE_OVERRIDE", None) HF_HUB_OFFLINE = os.environ.get("HF_HUB_OFFLINE", "0").lower() in ["true", "1", "yes"] def _cached_weight_files( model_id: str, revision: Optional[str], extension: str ) -> List[str]: """Guess weight files from the cached revision snapshot directory""" d = _get_cached_revision_directory(model_id, revision) if not d: return [] filenames = _weight_files_from_dir(d, extension) return filenames def _weight_hub_files_from_model_info( info: hf_api.ModelInfo, extension: str ) -> List[str]: return [ s.rfilename for s in info.siblings if s.rfilename.endswith(extension) and len(s.rfilename.split("/")) == 1 and "arguments" not in s.rfilename and "args" not in s.rfilename and "training" not in s.rfilename ] def _weight_files_from_dir(d: Path, extension: str) -> List[str]: # os.walk: do not iterate, just scan for depth 1, not recursively # see _weight_hub_files_from_model_info, that's also what is # done there with the len(s.rfilename.split("/")) == 1 condition root, _, files = next(os.walk(str(d))) filenames = [ os.path.join(root, f) for f in files if f.endswith(extension) and "arguments" not in f and "args" not in f and "training" not in f ] return filenames def _get_cached_revision_directory( model_id: str, revision: Optional[str] ) -> Optional[Path]: if revision is None: revision = "main" repo_cache = Path(HUGGINGFACE_HUB_CACHE) / Path( file_download.repo_folder_name(repo_id=model_id, repo_type="model") ) if not repo_cache.is_dir(): # No cache for this model return None refs_dir = repo_cache / "refs" snapshots_dir = repo_cache / "snapshots" # Resolve refs (for instance to convert main to the associated commit sha) if refs_dir.is_dir(): revision_file = refs_dir / revision if revision_file.exists(): with revision_file.open() as f: revision = f.read() # Check if revision folder exists if not snapshots_dir.exists(): return None cached_shas = os.listdir(snapshots_dir) if revision not in cached_shas: # No cache for this revision and we won't try to return a random revision return None return snapshots_dir / revision def weight_hub_files( model_id: str, revision: Optional[str] = None, extension: str = ".safetensors" ) -> List[str]: """Get the weights filenames on the hub""" api = HfApi() if HF_HUB_OFFLINE: filenames = _cached_weight_files(model_id, revision, extension) else: # Online case, fetch model info from the Hub info = api.model_info(model_id, revision=revision) filenames = _weight_hub_files_from_model_info(info, extension) if not filenames: raise EntryNotFoundError( f"No {extension} weights found for model {model_id} and revision {revision}.", None, ) return filenames def try_to_load_from_cache( model_id: str, revision: Optional[str], filename: str ) -> Optional[Path]: """Try to load a file from the Hugging Face cache""" d = _get_cached_revision_directory(model_id, revision) if not d: return None # Check if file exists in cache cached_file = d / filename return cached_file if cached_file.is_file() else None def weight_files( model_id: str, revision: Optional[str] = None, extension: str = ".safetensors" ) -> List[Path]: """Get the local files""" # Local model d = Path(model_id) if d.exists() and d.is_dir(): local_files = _weight_files_from_dir(d, extension) if not local_files: raise FileNotFoundError( f"No local weights found in {model_id} with extension {extension}" ) return [Path(f) for f in local_files] try: filenames = weight_hub_files(model_id, revision, extension) except EntryNotFoundError as e: if extension != ".safetensors": raise e # Try to see if there are pytorch weights pt_filenames = weight_hub_files(model_id, revision, extension=".bin") # Change pytorch extension to safetensors extension # It is possible that we have safetensors weights locally even though they are not on the # hub if we converted weights locally without pushing them filenames = [ f"{Path(f).stem.lstrip('pytorch_')}.safetensors" for f in pt_filenames ] if WEIGHTS_CACHE_OVERRIDE is not None: files = [] for filename in filenames: p = Path(WEIGHTS_CACHE_OVERRIDE) / filename if not p.exists(): raise FileNotFoundError( f"File {p} not found in {WEIGHTS_CACHE_OVERRIDE}." ) files.append(p) return files files = [] for filename in filenames: cache_file = try_to_load_from_cache( model_id, revision=revision, filename=filename ) if cache_file is None: raise LocalEntryNotFoundError( f"File {filename} of model {model_id} not found in " f"{os.getenv('HUGGINGFACE_HUB_CACHE', 'the local cache')}. " f"Please run `text-generation-server download-weights {model_id}` first." ) files.append(cache_file) return files def download_weights( filenames: List[str], model_id: str, revision: Optional[str] = None ) -> List[Path]: """Download the safetensors files from the hub""" def download_file(fname, tries=5, backoff: int = 5): local_file = try_to_load_from_cache(model_id, revision, fname) if local_file is not None: logger.info(f"File {fname} already present in cache.") return Path(local_file) for idx in range(tries): try: logger.info(f"Download file: {fname}") stime = time.time() local_file = hf_hub_download( filename=fname, repo_id=model_id, revision=revision, local_files_only=HF_HUB_OFFLINE, ) logger.info( f"Downloaded {local_file} in {timedelta(seconds=int(time.time() - stime))}." ) return Path(local_file) except Exception as e: if idx + 1 == tries: raise e logger.error(e) logger.info(f"Retrying in {backoff} seconds") time.sleep(backoff) logger.info(f"Retry {idx + 1}/{tries - 1}") # We do this instead of using tqdm because we want to parse the logs with the launcher start_time = time.time() files = [] for i, filename in enumerate(filenames): file = download_file(filename) elapsed = timedelta(seconds=int(time.time() - start_time)) remaining = len(filenames) - (i + 1) eta = (elapsed / (i + 1)) * remaining if remaining > 0 else 0 logger.info(f"Download: [{i + 1}/{len(filenames)}] -- ETA: {eta}") files.append(file) return files
text-generation-inference/server/text_generation_server/utils/hub.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/utils/hub.py", "repo_id": "text-generation-inference", "token_count": 3419 }
/* eslint-disable @typescript-eslint/no-explicit-any */ import { bertProcessing, byteLevelProcessing, robertaProcessing, sequenceProcessing, templateProcessing } from '../../' describe('bertProcessing', () => { it('instantiates correctly with only two parameters', () => { const processor = bertProcessing(['sep', 1], ['cls', 2]) expect(processor.constructor.name).toEqual('Processor') }) it('throws if only one argument is provided', () => { expect(() => (bertProcessing as any)(['sep', 1])).toThrow('Given napi value is not an array') }) it('throws if arguments are malformed', () => { expect(() => (bertProcessing as any)(['sep', '1'], ['cls', '2'])).toThrow( 'Failed to convert napi value String into rust type `u32`', ) expect(() => (bertProcessing as any)(['sep'], ['cls'])).toThrow('Array length < 2') }) }) describe('byteLevelProcessing', () => { it('instantiates correctly without any parameter', () => { const processor = byteLevelProcessing() expect(processor.constructor.name).toEqual('Processor') }) it('accepts `undefined` as first parameter', () => { expect(byteLevelProcessing(undefined)).toBeDefined() }) it('accepts `boolean` as first parameter', () => { expect(byteLevelProcessing(true)).toBeDefined() }) }) describe('robertaProcessing', () => { it('instantiates correctly with only two parameters', () => { const processor = robertaProcessing(['sep', 1], ['cls', 2]) expect(processor.constructor.name).toEqual('Processor') }) it('accepts `undefined` as third and fourth parameters', () => { expect(robertaProcessing(['sep', 1], ['cls', 2], undefined, undefined)).toBeDefined() }) it('accepts `boolean` as third and fourth parameter', () => { expect(robertaProcessing(['sep', 1], ['cls', 2], true, true)).toBeDefined() }) }) describe('templateProcessing', () => { it('instantiates correctly with only a single template', () => { const processor = templateProcessing('$A $A') expect(processor.constructor.name).toEqual('Processor') }) it('throws if special tokens are missing', () => { expect(() => templateProcessing('[CLS] $A [SEP]')).toThrow('Missing SpecialToken(s) with id(s)') }) it('instantiates correctly with both templates', () => { const processor = templateProcessing('[CLS] $A [SEP]', '[CLS] $A [SEP] $B:1 [SEP]:1', [ ['[CLS]', 1], ['[SEP]', 2], ]) expect(processor.constructor.name).toEqual('Processor') }) }) describe('sequenceProcessing', () => { it('accepts `PostProcessor[]` as first parameter', () => { const template = templateProcessing('[CLS] $A [SEP]', '[CLS] $A [SEP] $B:1 [SEP]:1', [ ['[CLS]', 1], ['[SEP]', 2], ]) const bytelevel = byteLevelProcessing(true) expect(sequenceProcessing([bytelevel, template])).toBeDefined() }) })
tokenizers/bindings/node/lib/bindings/post-processors.test.ts/0
{ "file_path": "tokenizers/bindings/node/lib/bindings/post-processors.test.ts", "repo_id": "tokenizers", "token_count": 1022 }
use serde::de::Deserializer; use serde::ser::Serializer; use serde::{Deserialize, Serialize}; use std::sync::{Arc, RwLock}; pub fn serialize<S, T>(val: &Option<Arc<RwLock<T>>>, s: S) -> Result<S::Ok, S::Error> where S: Serializer, T: Serialize, { T::serialize(&*(val.clone().unwrap()).read().unwrap(), s) } pub fn deserialize<'de, D, T>(d: D) -> Result<Option<Arc<RwLock<T>>>, D::Error> where D: Deserializer<'de>, T: Deserialize<'de>, { Ok(Some(Arc::new(RwLock::new(T::deserialize(d)?)))) }
tokenizers/bindings/node/src/arc_rwlock_serde.rs/0
{ "file_path": "tokenizers/bindings/node/src/arc_rwlock_serde.rs", "repo_id": "tokenizers", "token_count": 220 }
from enum import Enum from typing import List, Tuple, Union Offsets = Tuple[int, int] TextInputSequence = str """A :obj:`str` that represents an input sequence """ PreTokenizedInputSequence = Union[List[str], Tuple[str]] """A pre-tokenized input sequence. Can be one of: - A :obj:`List` of :obj:`str` - A :obj:`Tuple` of :obj:`str` """ TextEncodeInput = Union[ TextInputSequence, Tuple[TextInputSequence, TextInputSequence], List[TextInputSequence], ] """Represents a textual input for encoding. Can be either: - A single sequence: :data:`~tokenizers.TextInputSequence` - A pair of sequences: - A :obj:`Tuple` of :data:`~tokenizers.TextInputSequence` - Or a :obj:`List` of :data:`~tokenizers.TextInputSequence` of size 2 """ PreTokenizedEncodeInput = Union[ PreTokenizedInputSequence, Tuple[PreTokenizedInputSequence, PreTokenizedInputSequence], List[PreTokenizedInputSequence], ] """Represents a pre-tokenized input for encoding. Can be either: - A single sequence: :data:`~tokenizers.PreTokenizedInputSequence` - A pair of sequences: - A :obj:`Tuple` of :data:`~tokenizers.PreTokenizedInputSequence` - Or a :obj:`List` of :data:`~tokenizers.PreTokenizedInputSequence` of size 2 """ InputSequence = Union[TextInputSequence, PreTokenizedInputSequence] """Represents all the possible types of input sequences for encoding. Can be: - When ``is_pretokenized=False``: :data:`~TextInputSequence` - When ``is_pretokenized=True``: :data:`~PreTokenizedInputSequence` """ EncodeInput = Union[TextEncodeInput, PreTokenizedEncodeInput] """Represents all the possible types of input for encoding. Can be: - When ``is_pretokenized=False``: :data:`~TextEncodeInput` - When ``is_pretokenized=True``: :data:`~PreTokenizedEncodeInput` """ class OffsetReferential(Enum): ORIGINAL = "original" NORMALIZED = "normalized" class OffsetType(Enum): BYTE = "byte" CHAR = "char" class SplitDelimiterBehavior(Enum): REMOVED = "removed" ISOLATED = "isolated" MERGED_WITH_PREVIOUS = "merged_with_previous" MERGED_WITH_NEXT = "merged_with_next" CONTIGUOUS = "contiguous" from .tokenizers import ( AddedToken, Encoding, NormalizedString, PreTokenizedString, Regex, Token, Tokenizer, decoders, models, normalizers, pre_tokenizers, processors, trainers, __version__, ) from .implementations import ( BertWordPieceTokenizer, ByteLevelBPETokenizer, CharBPETokenizer, SentencePieceBPETokenizer, SentencePieceUnigramTokenizer, )
tokenizers/bindings/python/py_src/tokenizers/__init__.py/0
{ "file_path": "tokenizers/bindings/python/py_src/tokenizers/__init__.py", "repo_id": "tokenizers", "token_count": 984 }
# Generated content DO NOT EDIT class PreTokenizer: """ Base class for all pre-tokenizers This class is not supposed to be instantiated directly. Instead, any implementation of a PreTokenizer will return an instance of this class when instantiated. """ def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass class BertPreTokenizer(PreTokenizer): """ BertPreTokenizer This pre-tokenizer splits tokens on spaces, and also on punctuation. Each occurrence of a punctuation character will be treated separately. """ def __init__(self): pass def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass class ByteLevel(PreTokenizer): """ ByteLevel PreTokenizer This pre-tokenizer takes care of replacing all bytes of the given string with a corresponding representation, as well as splitting into words. Args: add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to add a space to the first word if there isn't already one. This lets us treat `hello` exactly like `say hello`. use_regex (:obj:`bool`, `optional`, defaults to :obj:`True`): Set this to :obj:`False` to prevent this `pre_tokenizer` from using the GPT2 specific regexp for spliting on whitespace. """ def __init__(self, add_prefix_space=True, use_regex=True): pass @staticmethod def alphabet(): """ Returns the alphabet used by this PreTokenizer. Since the ByteLevel works as its name suggests, at the byte level, it encodes each byte value to a unique visible character. This means that there is a total of 256 different characters composing this alphabet. Returns: :obj:`List[str]`: A list of characters that compose the alphabet """ pass def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass class CharDelimiterSplit(PreTokenizer): """ This pre-tokenizer simply splits on the provided char. Works like `.split(delimiter)` Args: delimiter: str: The delimiter char that will be used to split input """ def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass class Digits(PreTokenizer): """ This pre-tokenizer simply splits using the digits in separate tokens Args: individual_digits (:obj:`bool`, `optional`, defaults to :obj:`False`): If set to True, digits will each be separated as follows:: "Call 123 please" -> "Call ", "1", "2", "3", " please" If set to False, digits will grouped as follows:: "Call 123 please" -> "Call ", "123", " please" """ def __init__(self, individual_digits=False): pass def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass class Metaspace(PreTokenizer): """ Metaspace pre-tokenizer This pre-tokenizer replaces any whitespace by the provided replacement character. It then tries to split on these spaces. Args: replacement (:obj:`str`, `optional`, defaults to :obj:`▁`): The replacement character. Must be exactly one character. By default we use the `▁` (U+2581) meta symbol (Same as in SentencePiece). prepend_scheme (:obj:`str`, `optional`, defaults to :obj:`"always"`): Whether to add a space to the first word if there isn't already one. This lets us treat `hello` exactly like `say hello`. Choices: "always", "never", "first". First means the space is only added on the first token (relevant when special tokens are used or other pre_tokenizer are used). """ def __init__(self, replacement="_", prepend_scheme="always", split=True): pass def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass class Punctuation(PreTokenizer): """ This pre-tokenizer simply splits on punctuation as individual characters. Args: behavior (:class:`~tokenizers.SplitDelimiterBehavior`): The behavior to use when splitting. Choices: "removed", "isolated" (default), "merged_with_previous", "merged_with_next", "contiguous" """ def __init__(self, behavior="isolated"): pass def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass class Sequence(PreTokenizer): """ This pre-tokenizer composes other pre_tokenizers and applies them in sequence """ def __init__(self, pretokenizers): pass def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass class Split(PreTokenizer): """ Split PreTokenizer This versatile pre-tokenizer splits using the provided pattern and according to the provided behavior. The pattern can be inverted by making use of the invert flag. Args: pattern (:obj:`str` or :class:`~tokenizers.Regex`): A pattern used to split the string. Usually a string or a regex built with `tokenizers.Regex`. If you want to use a regex pattern, it has to be wrapped around a `tokenizers.Regex`, otherwise we consider is as a string pattern. For example `pattern="|"` means you want to split on `|` (imagine a csv file for example), while `pattern=tokenizers.Regex("1|2")` means you split on either '1' or '2'. behavior (:class:`~tokenizers.SplitDelimiterBehavior`): The behavior to use when splitting. Choices: "removed", "isolated", "merged_with_previous", "merged_with_next", "contiguous" invert (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to invert the pattern. """ def __init__(self, pattern, behavior, invert=False): pass def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass class UnicodeScripts(PreTokenizer): """ This pre-tokenizer splits on characters that belong to different language family It roughly follows https://github.com/google/sentencepiece/blob/master/data/Scripts.txt Actually Hiragana and Katakana are fused with Han, and 0x30FC is Han too. This mimicks SentencePiece Unigram implementation. """ def __init__(self): pass def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass class Whitespace(PreTokenizer): """ This pre-tokenizer simply splits using the following regex: `\w+|[^\w\s]+` """ def __init__(self): pass def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass class WhitespaceSplit(PreTokenizer): """ This pre-tokenizer simply splits on the whitespace. Works like `.split()` """ def __init__(self): pass def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass
tokenizers/bindings/python/py_src/tokenizers/pre_tokenizers/__init__.pyi/0
{ "file_path": "tokenizers/bindings/python/py_src/tokenizers/pre_tokenizers/__init__.pyi", "repo_id": "tokenizers", "token_count": 9661 }
use pyo3::exceptions; use pyo3::prelude::*; use pyo3::type_object::PyTypeInfo; use std::ffi::CString; use std::fmt::{Display, Formatter, Result as FmtResult}; use tokenizers::tokenizer::Result; #[derive(Debug)] pub struct PyError(pub String); impl PyError { #[allow(dead_code)] pub fn from(s: &str) -> Self { PyError(String::from(s)) } pub fn into_pyerr<T: PyTypeInfo>(self) -> PyErr { PyErr::new::<T, _>(format!("{}", self)) } } impl Display for PyError { fn fmt(&self, fmt: &mut Formatter) -> FmtResult { write!(fmt, "{}", self.0) } } impl std::error::Error for PyError {} pub struct ToPyResult<T>(pub Result<T>); impl<T> From<ToPyResult<T>> for PyResult<T> { fn from(v: ToPyResult<T>) -> Self { v.0.map_err(|e| exceptions::PyException::new_err(format!("{}", e))) } } impl<T> ToPyResult<T> { pub fn into_py(self) -> PyResult<T> { self.into() } } pub(crate) fn deprecation_warning(py: Python<'_>, version: &str, message: &str) -> PyResult<()> { let deprecation_warning = py.import("builtins")?.getattr("DeprecationWarning")?; let full_message = format!("Deprecated in {}: {}", version, message); pyo3::PyErr::warn(py, &deprecation_warning, &CString::new(full_message)?, 0) }
tokenizers/bindings/python/src/error.rs/0
{ "file_path": "tokenizers/bindings/python/src/error.rs", "repo_id": "tokenizers", "token_count": 548 }
from tokenizers import Tokenizer, decoders, models, normalizers, pre_tokenizers, processors from tokenizers.implementations import BaseTokenizer class TestBaseTokenizer: def test_get_set_components(self): toki = Tokenizer(models.BPE()) toki.normalizer = normalizers.NFC() toki.pre_tokenizer = pre_tokenizers.ByteLevel() toki.post_processor = processors.BertProcessing(("A", 0), ("B", 1)) toki.decoder = decoders.ByteLevel() tokenizer = BaseTokenizer(toki) assert isinstance(tokenizer.model, models.BPE) assert isinstance(tokenizer.normalizer, normalizers.NFC) assert isinstance(tokenizer.pre_tokenizer, pre_tokenizers.ByteLevel) assert isinstance(tokenizer.post_processor, processors.BertProcessing) assert isinstance(tokenizer.decoder, decoders.ByteLevel) tokenizer.model = models.Unigram() assert isinstance(tokenizer.model, models.Unigram) tokenizer.normalizer = normalizers.NFD() assert isinstance(tokenizer.normalizer, normalizers.NFD) tokenizer.pre_tokenizer = pre_tokenizers.Whitespace() assert isinstance(tokenizer.pre_tokenizer, pre_tokenizers.Whitespace) tokenizer.post_processor = processors.ByteLevel() assert isinstance(tokenizer.post_processor, processors.ByteLevel) tokenizer.decoder = decoders.WordPiece() assert isinstance(tokenizer.decoder, decoders.WordPiece)
tokenizers/bindings/python/tests/implementations/test_base_tokenizer.py/0
{ "file_path": "tokenizers/bindings/python/tests/implementations/test_base_tokenizer.py", "repo_id": "tokenizers", "token_count": 550 }
# Normalizers <tokenizerslangcontent> <python> ## BertNormalizer [[autodoc]] tokenizers.normalizers.BertNormalizer ## Lowercase [[autodoc]] tokenizers.normalizers.Lowercase ## NFC [[autodoc]] tokenizers.normalizers.NFC ## NFD [[autodoc]] tokenizers.normalizers.NFD ## NFKC [[autodoc]] tokenizers.normalizers.NFKC ## NFKD [[autodoc]] tokenizers.normalizers.NFKD ## Nmt [[autodoc]] tokenizers.normalizers.Nmt ## Normalizer [[autodoc]] tokenizers.normalizers.Normalizer ## Precompiled [[autodoc]] tokenizers.normalizers.Precompiled ## Replace [[autodoc]] tokenizers.normalizers.Replace ## Sequence [[autodoc]] tokenizers.normalizers.Sequence ## Strip [[autodoc]] tokenizers.normalizers.Strip ## StripAccents [[autodoc]] tokenizers.normalizers.StripAccents </python> <rust> The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website. </rust> <node> The node API has not been documented yet. </node> </tokenizerslangcontent>
tokenizers/docs/source-doc-builder/api/normalizers.mdx/0
{ "file_path": "tokenizers/docs/source-doc-builder/api/normalizers.mdx", "repo_id": "tokenizers", "token_count": 350 }
🤗 Tokenizers is tested on Python 3.5+. You should install 🤗 Tokenizers in a `virtual environment <https://docs.python.org/3/library/venv.html>`_. If you're unfamiliar with Python virtual environments, check out the `user guide <https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/>`__. Create a virtual environment with the version of Python you're going to use and activate it. Installation with pip ---------------------------------------------------------------------------------------------------- 🤗 Tokenizers can be installed using pip as follows:: pip install tokenizers Installation from sources ---------------------------------------------------------------------------------------------------- To use this method, you need to have the Rust language installed. You can follow `the official guide <https://www.rust-lang.org/learn/get-started>`__ for more information. If you are using a unix based OS, the installation should be as simple as running:: curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh Or you can easily update it with the following command:: rustup update Once rust is installed, we can start retrieving the sources for 🤗 Tokenizers:: git clone https://github.com/huggingface/tokenizers Then we go into the python bindings folder:: cd tokenizers/bindings/python At this point you should have your `virtual environment`_ already activated. In order to compile 🤗 Tokenizers, you need to:: pip install -e .
tokenizers/docs/source/installation/python.inc/0
{ "file_path": "tokenizers/docs/source/installation/python.inc", "repo_id": "tokenizers", "token_count": 383 }
#[macro_use] extern crate criterion; use criterion::Criterion; use std::collections::HashMap; use std::fs::read_to_string; use std::time::{Duration, Instant}; use tokenizers::models::unigram::Unigram; use tokenizers::models::unigram::UnigramTrainer; pub fn bench_train(c: &mut Criterion) { let trainer = UnigramTrainer::builder() .show_progress(false) .unk_token(Some("<UNK>".into())) .build() .unwrap(); let mut model = Unigram::default(); let content = read_to_string("data/small.txt").unwrap(); let mut word_counts = HashMap::new(); content.split_whitespace().for_each(|word| { // This is important for the test of char vs u8 let word = format!("▁{word}"); *word_counts.entry(word).or_insert(0) += 1; }); let sentences: Vec<_> = word_counts .iter() .map(|(s, i)| (s.to_owned(), *i)) .collect(); c.bench_function("Unigram Train vocabulary (small)", |b| { b.iter_custom(|iters| { let mut duration = Duration::new(0, 0); for _i in 0..iters { let sentences = sentences.clone(); let start = Instant::now(); trainer.do_train(sentences, &mut model).unwrap(); duration = duration.checked_add(start.elapsed()).unwrap(); } duration }) }); let content = read_to_string("data/big.txt").unwrap(); // creating `medium` data, which is the first 25% of `data/big.txt` let content = String::from(&content[..(content.len() as f64 * 0.25) as usize]); let mut word_counts = HashMap::new(); content.split_whitespace().for_each(|word| { // This is important for the test of char vs u8 let word = format!("▁{word}"); *word_counts.entry(word).or_insert(0) += 1; }); let sentences: Vec<_> = word_counts .iter() .map(|(s, i)| (s.to_owned(), *i)) .collect(); c.bench_function("Unigram Train vocabulary (medium)", |b| { b.iter_custom(|iters| { let mut duration = Duration::new(0, 0); for _i in 0..iters { let sentences = sentences.clone(); let start = Instant::now(); trainer.do_train(sentences, &mut model).unwrap(); duration = duration.checked_add(start.elapsed()).unwrap(); } duration }) }); } criterion_group! { name = benches_train; config = Criterion::default().sample_size(10); targets = bench_train } criterion_main!(benches_train);
tokenizers/tokenizers/benches/unigram_benchmark.rs/0
{ "file_path": "tokenizers/tokenizers/benches/unigram_benchmark.rs", "repo_id": "tokenizers", "token_count": 1172 }
<!DOCTYPE html> <html> <head> <meta charset="utf-8"> <title>Hello wasm-pack!</title> </head> <body> <noscript>This page contains webassembly and javascript content, please enable javascript in your browser.</noscript> <script src="./bootstrap.js"></script> </body> </html>
tokenizers/tokenizers/examples/unstable_wasm/www/index.html/0
{ "file_path": "tokenizers/tokenizers/examples/unstable_wasm/www/index.html", "repo_id": "tokenizers", "token_count": 110 }
use super::{super::OrderedVocabIter, trainer::BpeTrainer, Error, Pair, Word}; use crate::tokenizer::{Model, Result, Token}; use crate::utils::cache::{Cache, DEFAULT_CACHE_CAPACITY, MAX_LENGTH}; use crate::utils::iter::ResultShunt; use serde_json::Value; use std::borrow::Cow; use std::{ collections::HashMap, fs::File, io::prelude::*, io::{BufRead, BufReader}, path::{Path, PathBuf}, }; pub type Vocab = HashMap<String, u32>; type VocabR = HashMap<u32, String>; pub type MergeMap = HashMap<Pair, (u32, u32)>; pub type Merges = Vec<(String, String)>; struct Config { files: Option<(String, String)>, vocab: Vocab, merges: Merges, cache_capacity: usize, dropout: Option<f32>, unk_token: Option<String>, continuing_subword_prefix: Option<String>, end_of_word_suffix: Option<String>, fuse_unk: bool, byte_fallback: bool, ignore_merges: bool, } /// A `BpeBuilder` can be used to create a `BPE` model with a custom configuration. pub struct BpeBuilder { config: Config, } impl Default for BpeBuilder { fn default() -> Self { Self { config: Config { files: None, vocab: HashMap::new(), merges: vec![], cache_capacity: DEFAULT_CACHE_CAPACITY, dropout: None, unk_token: None, continuing_subword_prefix: None, end_of_word_suffix: None, fuse_unk: false, byte_fallback: false, ignore_merges: false, }, } } } impl BpeBuilder { /// Constructs a new `BpeBuilder`. pub fn new() -> Self { Self::default() } /// Set the input files. #[must_use] pub fn files(mut self, vocab: String, merges: String) -> Self { self.config.files = Some((vocab, merges)); self } /// Set the vocab (token -> ID) and merges mappings. #[must_use] pub fn vocab_and_merges(mut self, vocab: Vocab, merges: Merges) -> Self { self.config.vocab = vocab; self.config.merges = merges; self } /// Set the cache's capacity. Set to 0 if you want to disable caching. #[must_use] pub fn cache_capacity(mut self, capacity: usize) -> Self { self.config.cache_capacity = capacity; self } /// Use [dropout](https://arxiv.org/abs/1910.13267) with the model. #[must_use] pub fn dropout(mut self, dropout: f32) -> Self { self.config.dropout = Some(dropout); self } /// Set the `UNK` token for the vocab. #[must_use] pub fn unk_token(mut self, unk_token: String) -> Self { self.config.unk_token = Some(unk_token); self } /// Set the `continuing_subword_prefix` option. #[must_use] pub fn continuing_subword_prefix(mut self, prefix: String) -> Self { self.config.continuing_subword_prefix = Some(prefix); self } /// Set the `end_of_word_suffix` option. #[must_use] pub fn end_of_word_suffix(mut self, prefix: String) -> Self { self.config.end_of_word_suffix = Some(prefix); self } /// Set the `fuse_unk` option. #[must_use] pub fn fuse_unk(mut self, fuse_unk: bool) -> Self { self.config.fuse_unk = fuse_unk; self } /// Set the `byte_fallback` option. #[must_use] pub fn byte_fallback(mut self, byte_fallback: bool) -> Self { self.config.byte_fallback = byte_fallback; self } /// Set the `ignore_merges` option. #[must_use] pub fn ignore_merges(mut self, ignore_merges: bool) -> Self { self.config.ignore_merges = ignore_merges; self } /// Returns a `BPE` model that uses the `BpeBuilder`'s configuration. pub fn build(mut self) -> Result<BPE> { // Validate dropout. if let Some(p) = self.config.dropout { if !(0.0..=1.0).contains(&p) { return Err(Error::InvalidDropout.into()); } } // Read files if necessary if let Some((vocab, merges)) = self.config.files { let (v, m) = BPE::read_file(&vocab, &merges)?; self.config.vocab = v; self.config.merges = m; } let vocab_r = self .config .vocab .iter() .map(|(key, val)| (*val, key.to_owned())) .collect(); let cache = match self.config.cache_capacity { 0 => None, capacity => Some(Cache::new(capacity)), }; let vocab = self.config.vocab; let prefix_len = if let Some(prefix) = &self.config.continuing_subword_prefix { prefix.len() } else { 0 }; let merge_map: MergeMap = self .config .merges .into_iter() .enumerate() .map(|(i, (a, b))| -> Result<(Pair, (u32, u32))> { let a_id = vocab .get(&a) .ok_or_else(|| Error::MergeTokenOutOfVocabulary(a.to_owned()))?; let b_id = vocab .get(&b) .ok_or_else(|| Error::MergeTokenOutOfVocabulary(b.to_owned()))?; let new_token = format!("{}{}", a, &b[prefix_len..]); let new_id = vocab .get(&new_token) .ok_or(Error::MergeTokenOutOfVocabulary(new_token))?; Ok(((*a_id, *b_id), (i as u32, *new_id))) }) .collect::<Result<MergeMap>>()?; // merges.insert(pair, (rank as u32, *new_id)); Ok(BPE { vocab, vocab_r, merges: merge_map, cache, dropout: self.config.dropout, unk_token: self.config.unk_token, continuing_subword_prefix: self.config.continuing_subword_prefix, end_of_word_suffix: self.config.end_of_word_suffix, fuse_unk: self.config.fuse_unk, byte_fallback: self.config.byte_fallback, ignore_merges: self.config.ignore_merges, }) } } /// A [Byte Pair Encoding](https://www.aclweb.org/anthology/P16-1162/) model. #[derive(PartialEq)] pub struct BPE { /// The vocabulary assigns a number to each token. pub(crate) vocab: Vocab, /// Reversed vocabulary, to rebuild sentences. pub(crate) vocab_r: VocabR, /// Contains the mapping between Pairs and their (rank, new_id). pub(crate) merges: MergeMap, /// Contains the cache for optimizing the encoding step. cache: Option<Cache<String, Word>>, /// Dropout probability for merges. 0.0 = no dropout is the default. At 1.0, tokenization will /// perform no merges, so the result will just be characters. pub dropout: Option<f32>, /// The unknown token to be used when we encounter an unknown char pub unk_token: Option<String>, /// An optional prefix to use on any subword that exist only behind another one pub continuing_subword_prefix: Option<String>, /// An optional suffix to caracterize and end-of-word subword pub end_of_word_suffix: Option<String>, /// Do multiple unk tokens get fused pub fuse_unk: bool, /// Byte fallback from sentence pieces, instead of UNK, uses `"<0x00>"` /// for each byte in the unk token pub byte_fallback: bool, /// Whether or not to direct output words if they are part of the vocab. pub ignore_merges: bool, } impl std::fmt::Debug for BPE { fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { fmt.debug_struct("BPE") .field("dropout", &self.dropout) .field("unk_token", &self.unk_token) .field("continuing_subword_prefix", &self.continuing_subword_prefix) .field("end_of_word_suffix", &self.end_of_word_suffix) .field("fuse_unk", &self.fuse_unk) .field("byte_fallback", &self.byte_fallback) .field("vocab", &self.vocab.len()) .field("merges", &self.merges.len()) .field("ignore_merges", &self.ignore_merges) .finish() } } impl Default for BPE { fn default() -> Self { Self::builder().build().unwrap() } } impl Clone for BPE { // `Clone` can't be derive because it's not implemented for `Cache`. // To keep things simple when we clone, the new BPE will start with a fresh cache. fn clone(&self) -> Self { let fresh_cache = self.cache.as_ref().map(|cache| cache.fresh()); Self { vocab: self.vocab.clone(), vocab_r: self.vocab_r.clone(), merges: self.merges.clone(), cache: fresh_cache, dropout: self.dropout, unk_token: self.unk_token.clone(), continuing_subword_prefix: self.continuing_subword_prefix.clone(), end_of_word_suffix: self.end_of_word_suffix.clone(), fuse_unk: self.fuse_unk, byte_fallback: self.byte_fallback, ignore_merges: self.ignore_merges, } } } /// Converts the merges strings (for example from `merges.txt` file) with the format /// "{pair_a} {pair_b}" into the format expected by the BPE struct pub(crate) fn convert_merges_to_hashmap<I: Iterator<Item = String>>( iter: I, _vocab: &Vocab, ) -> Result<Merges> { let mut merges = vec![]; let lines = iter.filter(|l| !l.starts_with("#version")); for (rank, line) in lines.enumerate() { let parts = line.split(' ').collect::<Vec<_>>(); if parts.len() != 2 { return Err(Error::BadMerges(rank + 1).into()); } merges.push((parts[0].to_string(), parts[1].to_string())); } Ok(merges) } impl BPE { /// Initialize a `BpeBuilder`. pub fn builder() -> BpeBuilder { BpeBuilder::new() } /// Create a new BPE model with the given vocab and merges. pub fn new(vocab: Vocab, merges: Merges) -> Self { Self::builder() .vocab_and_merges(vocab, merges) .build() .unwrap() } /// Initialize a BpeBuilder model from vocab and merges files pub fn from_file(vocab: &str, merges: &str) -> BpeBuilder { Self::builder().files(vocab.to_owned(), merges.to_owned()) } /// Read the given files to extract the vocab and merges pub fn read_file(vocab: &str, merges: &str) -> Result<(Vocab, Merges)> { // Read vocab.json let vocab_file = File::open(vocab)?; let mut vocab_file = BufReader::new(vocab_file); let mut buffer = String::new(); vocab_file.read_to_string(&mut buffer)?; let json: Value = serde_json::from_str(&buffer)?; let mut vocab = HashMap::new(); match json { Value::Object(m) => { for (token, id) in m { if let Value::Number(id) = id { let id = id.as_u64().ok_or(Error::BadVocabulary)? as u32; vocab.insert(token, id); } } } _ => return Err(Box::new(Error::BadVocabulary)), }; // Read merges file let merge_file = File::open(merges)?; let merge_file = BufReader::new(merge_file); let merges = ResultShunt::process(merge_file.lines(), |iter| { convert_merges_to_hashmap(iter, &vocab) })??; Ok((vocab, merges)) } /// Reset the cache. pub fn clear_cache(&self) { if let Some(ref cache) = self.cache { cache.clear() } } /// Resize the cache pub fn resize_cache(&mut self, capacity: usize) { if let Some(ref mut cache) = self.cache { cache.resize(capacity); } } pub fn get_vocab(&self) -> Vocab { self.vocab.clone() } pub fn get_unk_token(&self) -> &Option<String> { &self.unk_token } pub fn get_continuing_subword_prefix(&self) -> &Option<String> { &self.continuing_subword_prefix } fn merge_word(&self, w: &str) -> Result<Word> { let mut indices = w.char_indices().map(|(idx, _)| idx).peekable(); let mut word = Word::with_capacity(w.len()); let mut unk: Option<(u32, usize)> = None; while let Some(i) = indices.next() { let end = indices.peek(); let is_first = i == 0; let is_last = end.is_none(); let mut s = if let Some(e) = end { Cow::Borrowed(&w[i..*e]) } else { Cow::Borrowed(&w[i..]) }; let byte_len = s.len(); // Add the `continuing_subword_prefix` if relevant if !is_first { if let Some(ref prefix) = self.continuing_subword_prefix { s = format!("{prefix}{s}").into() } } // Add the `end_of_word_suffix` if relevant if is_last { if let Some(ref suffix) = self.end_of_word_suffix { s = format!("{s}{suffix}").into() } } if let Some(id) = self.vocab.get(s.as_ref()) { if let Some((unk_id, unk_len)) = unk { word.add(unk_id, unk_len); unk = None; } word.add(*id, byte_len); } else { if self.byte_fallback { let tokens: Option<Vec<_>> = s .bytes() .map(|b| -> Option<&u32> { let code = format!("<{b:#04X}>"); self.vocab.get(&code) }) .collect(); if let Some(tokens) = tokens { for t in tokens { word.add(*t, 1); } continue; } } if let Some(unk_token) = &self.unk_token { unk = match (unk, self.fuse_unk) { (Some((unk_id, unk_len)), true) => { // Fuse unk Some((unk_id, unk_len + byte_len)) } (Some((unk_id, unk_len)), false) => { // Do not fuse unk, add the previous one word.add(unk_id, unk_len); Some(( *self.vocab.get(unk_token).ok_or_else(|| { Error::UnkTokenOutOfVocabulary(unk_token.to_owned()) })?, byte_len, )) } _ => Some(( *self.vocab.get(unk_token).ok_or_else(|| { Error::UnkTokenOutOfVocabulary(unk_token.to_owned()) })?, byte_len, )), }; } } } if let Some((unk_id, unk_len)) = unk { word.add(unk_id, unk_len); } word.merge_all(&self.merges, self.dropout); Ok(word) } fn word_to_tokens<'a, 'b: 'a>(&'a self, word: &'b Word) -> impl Iterator<Item = Token> + 'a { word.get_chars_iter() .zip(word.get_offsets_iter()) .map(move |(id, offsets)| Token::new(id, self.vocab_r[&id].clone(), offsets)) } fn tokenize_with_cache(&self, sequence: &str) -> Result<Vec<Token>> { if self.ignore_merges { if let Some(id) = self.vocab.get(sequence) { return Ok(vec![Token::new( *id, sequence.to_string().clone(), (0, sequence.len()), )]); } } if let Some(ref hit) = self.cache.as_ref().and_then(|c| c.get(sequence)) { return Ok(self.word_to_tokens(hit).collect()); } let word = self.merge_word(sequence)?; let ret = self.word_to_tokens(&word).collect(); if let Some(ref cache) = self.cache { if sequence.len() < MAX_LENGTH { cache.set(sequence.to_owned(), word); } } Ok(ret) } } impl Model for BPE { type Trainer = BpeTrainer; fn get_vocab(&self) -> HashMap<String, u32> { self.vocab.clone() } fn get_vocab_size(&self) -> usize { self.vocab.len() } fn tokenize(&self, sequence: &str) -> Result<Vec<Token>> { if sequence.is_empty() { return Ok(vec![]); } if self.dropout.is_none() || self.dropout == Some(0.0) { self.tokenize_with_cache(sequence) } else { let word = self.merge_word(sequence)?; Ok(self.word_to_tokens(&word).collect()) } } fn token_to_id(&self, token: &str) -> Option<u32> { self.vocab.get(token).copied() } fn id_to_token(&self, id: u32) -> Option<String> { self.vocab_r.get(&id).cloned() } fn save(&self, folder: &Path, name: Option<&str>) -> Result<Vec<PathBuf>> { let vocab_file_name = match name { Some(name) => format!("{name}-vocab.json"), None => "vocab.json".to_string(), }; // Write vocab.json let vocab_path: PathBuf = [folder, Path::new(vocab_file_name.as_str())] .iter() .collect(); let mut vocab_file = File::create(&vocab_path)?; let order_vocab_iter = OrderedVocabIter::new(&self.vocab_r); let serialized = serde_json::to_string(&order_vocab_iter)?; vocab_file.write_all(serialized.as_bytes())?; // Write merges.txt let merges_file_name = match name { Some(name) => format!("{name}-merges.txt"), None => "merges.txt".to_string(), }; let merges_path: PathBuf = [folder, Path::new(merges_file_name.as_str())] .iter() .collect(); let mut merges_file = File::create(&merges_path)?; let mut merges: Vec<(&Pair, &u32)> = self .merges .iter() .map(|(pair, (rank, _))| (pair, rank)) .collect(); merges.sort_unstable_by_key(|k| *k.1); merges_file.write_all(b"#version: 0.2\n")?; merges_file.write_all( &merges .into_iter() .flat_map(|(pair, _)| { format!("{} {}\n", self.vocab_r[&pair.0], self.vocab_r[&pair.1]).into_bytes() }) .collect::<Vec<_>>()[..], )?; Ok(vec![vocab_path, merges_path]) } fn get_trainer(&self) -> BpeTrainer { BpeTrainer::default() } } #[cfg(test)] mod tests { use super::*; use tempfile::NamedTempFile; #[test] fn test_ordered_vocab_iter() { let vocab_r: VocabR = [ (0, "a".into()), (1, "b".into()), (2, "c".into()), (3, "ab".into()), ] .iter() .cloned() .collect(); let order_vocab_iter = OrderedVocabIter::new(&vocab_r); let serialized = serde_json::to_string(&order_vocab_iter).unwrap(); assert_eq!(serialized, "{\"a\":0,\"b\":1,\"c\":2,\"ab\":3}"); } #[test] fn test_unk_not_fused() { let vocab: Vocab = [("<unk>".into(), 0), ("a".into(), 1), ("b".into(), 2)] .iter() .cloned() .collect(); let bpe = BpeBuilder::default() .vocab_and_merges(vocab, vec![]) .unk_token("<unk>".to_string()) .build() .unwrap(); let tokens = bpe.tokenize("c").unwrap(); assert_eq!(tokens, vec![Token::new(0u32, "<unk>".into(), (0, 1)),]); let tokens = bpe.tokenize("cc").unwrap(); assert_eq!( tokens, vec![ Token::new(0u32, "<unk>".into(), (0, 1)), Token::new(0u32, "<unk>".into(), (1, 2)), ] ); let tokens = bpe.tokenize("accb").unwrap(); assert_eq!( tokens, vec![ Token::new(1u32, "a".into(), (0, 1)), Token::new(0u32, "<unk>".into(), (1, 2)), Token::new(0u32, "<unk>".into(), (2, 3)), Token::new(2u32, "b".into(), (3, 4)), ] ); } #[test] fn test_unk_get_fused() { let vocab: Vocab = [("<unk>".into(), 0), ("a".into(), 1), ("b".into(), 2)] .iter() .cloned() .collect(); let bpe = BpeBuilder::default() .vocab_and_merges(vocab, vec![]) .unk_token("<unk>".to_string()) .fuse_unk(true) .build() .unwrap(); let tokens = bpe.tokenize("c").unwrap(); assert_eq!(tokens, vec![Token::new(0u32, "<unk>".into(), (0, 1)),]); let tokens = bpe.tokenize("cc").unwrap(); assert_eq!(tokens, vec![Token::new(0u32, "<unk>".into(), (0, 2)),]); let tokens = bpe.tokenize("accb").unwrap(); assert_eq!( tokens, vec![ Token::new(1u32, "a".into(), (0, 1)), Token::new(0u32, "<unk>".into(), (1, 3)), Token::new(2u32, "b".into(), (3, 4)), ] ); } #[test] // Test tokenization. With dropout set to 0 tokenization is deterministic, // so we know exactly what the result should be. // // To test this, we'll build a simple model to tokenize the word 'unrelated'. fn test_tokenize_with_and_without_dropout() { let vocab: Vocab = [ ("u".into(), 0), ("n".into(), 1), ("r".into(), 2), ("e".into(), 3), ("l".into(), 4), ("a".into(), 5), ("t".into(), 6), ("d".into(), 7), ("re".into(), 8), ("at".into(), 9), ("ed".into(), 10), ("un".into(), 11), ("ated".into(), 12), ("rel".into(), 13), ("related".into(), 14), ("unrelated".into(), 15), ] .iter() .cloned() .collect(); let merges: Merges = vec![ ("r".to_string(), "e".to_string()), ("a".to_string(), "t".to_string()), ("e".to_string(), "d".to_string()), ("u".to_string(), "n".to_string()), ("at".to_string(), "ed".to_string()), ("re".to_string(), "l".to_string()), ("rel".to_string(), "ated".to_string()), ("un".to_string(), "related".to_string()), ]; let mut bpe = BPE::new(vocab, merges); // With no dropout: let tokens = bpe.tokenize("unrelated").unwrap(); assert_eq!(tokens, vec![Token::new(15u32, "unrelated".into(), (0, 9))]); // With dropout = 0.0 (equivalent to dropout == none) bpe.dropout = Some(0.0); let tokens = bpe.tokenize("unrelated").unwrap(); assert_eq!(tokens, vec![Token::new(15u32, "unrelated".into(), (0, 9))]); // Now set dropout to 1.0. Result should be no merges performed. bpe.dropout = Some(1.0); let tokens = bpe.tokenize("unrelated").unwrap(); assert_eq!( tokens, vec![ Token::new(0u32, "u".into(), (0, 1)), Token::new(1u32, "n".into(), (1, 2)), Token::new(2u32, "r".into(), (2, 3)), Token::new(3u32, "e".into(), (3, 4)), Token::new(4u32, "l".into(), (4, 5)), Token::new(5u32, "a".into(), (5, 6)), Token::new(6u32, "t".into(), (6, 7)), Token::new(3u32, "e".into(), (7, 8)), Token::new(7u32, "d".into(), (8, 9)), ] ); // Now try with dropout between 0 and 1. bpe.dropout = Some(0.5); let tokens = bpe.tokenize("unrelated").unwrap(); assert!(!tokens.is_empty() && tokens.len() <= 9); } #[test] // Ensure `BPE::from_file` works as expected. fn test_bpe_from_file() { // Set up vocab file. let mut vocab_file = NamedTempFile::new().unwrap(); vocab_file .write_all(b"{\"a\": 0, \"b\": 1, \"c\": 2, \"ab\": 3}") .unwrap(); // Set up merges file. let mut merges_file = NamedTempFile::new().unwrap(); merges_file.write_all(b"#version: 0.2\na b").unwrap(); // Make sure we can instantiate a BPE model from the files. let builder = BPE::from_file( vocab_file.path().to_str().unwrap(), merges_file.path().to_str().unwrap(), ); let bpe = builder.build().unwrap(); // Check merges. assert_eq!(bpe.merges.get(&(0, 1)).unwrap(), &(0u32, 3u32)); // Check vocab. assert_eq!(bpe.vocab.get("a").unwrap(), &0u32); assert_eq!(bpe.vocab.get("b").unwrap(), &1u32); assert_eq!(bpe.vocab.get("c").unwrap(), &2u32); assert_eq!(bpe.vocab.get("ab").unwrap(), &3u32); } #[test] // Ensure BPEBuilder with dropout = 0.0 doesn't error fn test_bpe_with_dropout_0() { let bpe = BPE::builder().dropout(0.0).build().unwrap(); assert_eq!(bpe.dropout, Some(0.0)); } #[test] // Ensure `BPE::from_file` works as expected. fn test_bpe_with_continuing_subword_prefix() { let vocab: Vocab = vec![ ("a".to_string(), 0), ("##b".to_string(), 1), ("##c".to_string(), 2), ("ab".to_string(), 3), ("abc".to_string(), 4), ] .into_iter() .collect(); let merges = vec![ ("a".to_string(), "##b".to_string()), ("ab".to_string(), "##c".to_string()), ]; let bpe = BPE::builder() .vocab_and_merges(vocab, merges) .unk_token("[UNK]".to_string()) .continuing_subword_prefix("##".to_string()) .build() .unwrap(); let res = bpe.tokenize("ab"); assert_eq!( res.unwrap(), vec![Token { id: 3, value: "ab".to_string(), offsets: (0, 2) }] ); let res = bpe.tokenize("abc"); assert_eq!( res.unwrap(), vec![Token { id: 4, value: "abc".to_string(), offsets: (0, 3) }] ); } #[test] // Ensure `MergeTokenOutOfVocabulary` error is returned when it should be. fn test_bpe_from_file_merge_token_oov() { // Set up vocab file. let mut vocab_file = NamedTempFile::new().unwrap(); vocab_file .write_all(b"{\"a\": 0, \"b\": 1, \"c\": 2, \"ab\": 3}") .unwrap(); // Set up merges file. let mut merges_file = NamedTempFile::new().unwrap(); merges_file.write_all(b"#version: 0.2\na b\na d").unwrap(); // Ensure the result of BPE::from_file is a MergeTokenOutOfVocabulary error. match BPE::from_file( vocab_file.path().to_str().unwrap(), merges_file.path().to_str().unwrap(), ) .build() { Ok(_) => unreachable!(), Err(err) => match err.downcast_ref::<Error>() { Some(Error::MergeTokenOutOfVocabulary(token)) => { assert_eq!(*token, String::from("d")) } _ => unreachable!(), }, } } #[test] // Ensure `BadMerges` error is returned when there is an invalid line in the // merges.txt file. fn test_bpe_from_file_bad_merges() { // Set up vocab file. let mut vocab_file = NamedTempFile::new().unwrap(); vocab_file .write_all("{\"a\": 0, \"b\": 1, \"c\": 2, \"ab\": 3}".as_bytes()) .unwrap(); // Set up merges file with a bad line. let mut merges_file = NamedTempFile::new().unwrap(); merges_file.write_all(b"#version: 0.2\na b\nc").unwrap(); // Ensure the result of BPE::from_file is a BadMerges error. match BPE::from_file( vocab_file.path().to_str().unwrap(), merges_file.path().to_str().unwrap(), ) .build() { Ok(_) => unreachable!(), Err(err) => match err.downcast_ref::<Error>() { Some(Error::BadMerges(line)) => assert_eq!(*line, 2), _ => unreachable!(), }, } } #[test] fn test_bpe_byte_fallback() { // 0x61 == 'a' in bytes let vocab: Vocab = [("<unk>".into(), 0), ("<0x61>".into(), 1)] .iter() .cloned() .collect(); let bpe = BpeBuilder::default() .vocab_and_merges(vocab, vec![]) .unk_token("<unk>".to_string()) .byte_fallback(true) .build() .unwrap(); let tokens = bpe.tokenize("c").unwrap(); assert_eq!(tokens, vec![Token::new(0u32, "<unk>".into(), (0, 1)),]); let tokens = bpe.tokenize("a").unwrap(); assert_eq!(tokens, vec![Token::new(1u32, "<0x61>".into(), (0, 1)),]); } #[test] fn test_bpe_byte_fallback_newline() { // 0x0A == '\n' in bytes let vocab: Vocab = [("<unk>".into(), 0), ("<0x0A>".into(), 1)] .iter() .cloned() .collect(); let bpe = BpeBuilder::default() .vocab_and_merges(vocab, vec![]) .unk_token("<unk>".to_string()) .byte_fallback(true) .build() .unwrap(); let tokens = bpe.tokenize("\n").unwrap(); assert_eq!(tokens, vec![Token::new(1u32, "<0x0A>".into(), (0, 1)),]); } #[test] fn test_ignore_merges() { // 0x0A == '\n' in bytes let vocab: Vocab = [ (".:.:".into(), 0), ("Ġbelirtilen".into(), 1), (".".into(), 2), (":".into(), 3), ("bel".into(), 4), ("irtilen".into(), 5), ("Ġ".into(), 6), (".:".into(), 7), ("belirtilen".into(), 8), (".:.".into(), 9), ("be".into(), 10), ("l".into(), 11), ("ir".into(), 12), ("ti".into(), 13), ("en".into(), 14), ("irtil".into(), 15), ("irti".into(), 16), ("i".into(), 17), ("r".into(), 18), ("t".into(), 19), ("b".into(), 20), ("e".into(), 21), ("n".into(), 22), ] .iter() .cloned() .collect(); let mut bpe = BpeBuilder::default() .vocab_and_merges( vocab, vec![ (".".into(), ":".into()), ("b".into(), "e".into()), ("be".into(), "l".into()), ("i".into(), "r".into()), ("t".into(), "i".into()), ("ir".into(), "ti".into()), ("e".into(), "n".into()), ("irti".into(), "l".into()), ], ) .ignore_merges(true) .build() .unwrap(); let tokens = bpe.tokenize(".:.:").unwrap(); assert_eq!(tokens, vec![Token::new(0u32, ".:.:".into(), (0, 4))]); let tokens = bpe.tokenize("Ġbelirtilen").unwrap(); assert_eq!( tokens, vec![Token::new(1u32, "Ġbelirtilen".into(), (0, 12))] ); bpe.ignore_merges = false; let tokens = bpe.tokenize(".:.:").unwrap(); assert_eq!( tokens, vec![ Token::new(7u32, ".:".into(), (0, 2)), Token::new(7u32, ".:".into(), (2, 4)) ] ); let tokens = bpe.tokenize("Ġbelirtilen").unwrap(); assert_eq!( tokens, vec![ Token { id: 6, value: "Ġ".into(), offsets: (0, 2) }, Token { id: 4, value: "bel".into(), offsets: (2, 5) }, Token { id: 15, value: "irtil".into(), offsets: (5, 10) }, Token { id: 14, value: "en".into(), offsets: (10, 12) } ] ) } }
tokenizers/tokenizers/src/models/bpe/model.rs/0
{ "file_path": "tokenizers/tokenizers/src/models/bpe/model.rs", "repo_id": "tokenizers", "token_count": 17728 }
use super::WordPiece; use crate::models::bpe::{BpeTrainer, BpeTrainerBuilder, BPE}; use crate::tokenizer::{AddedToken, Result, Trainer}; use serde::{Deserialize, Serialize}; use std::collections::HashSet; /// A `WordPieceTrainerBuilder` can be used to create a `WordPieceTrainer` with a custom /// configuration. pub struct WordPieceTrainerBuilder { bpe_trainer_builder: BpeTrainerBuilder, } impl Default for WordPieceTrainerBuilder { fn default() -> Self { Self { bpe_trainer_builder: BpeTrainerBuilder::new().continuing_subword_prefix("##".into()), } } } impl WordPieceTrainerBuilder { /// Constructs a new `WordPieceTrainerBuilder` pub fn new() -> Self { Self::default() } /// Set the expected minimum frequency #[must_use] pub fn min_frequency(mut self, frequency: u64) -> Self { self.bpe_trainer_builder = self.bpe_trainer_builder.min_frequency(frequency); self } /// Set the vocabulary size #[must_use] pub fn vocab_size(mut self, size: usize) -> Self { self.bpe_trainer_builder = self.bpe_trainer_builder.vocab_size(size); self } /// Set whether to show progress #[must_use] pub fn show_progress(mut self, show: bool) -> Self { self.bpe_trainer_builder = self.bpe_trainer_builder.show_progress(show); self } /// Set the special tokens #[must_use] pub fn special_tokens(mut self, tokens: Vec<AddedToken>) -> Self { self.bpe_trainer_builder = self.bpe_trainer_builder.special_tokens(tokens); self } /// Set whether to limit the alphabet #[must_use] pub fn limit_alphabet(mut self, limit: usize) -> Self { self.bpe_trainer_builder = self.bpe_trainer_builder.limit_alphabet(limit); self } /// Set the initial alphabet #[must_use] pub fn initial_alphabet(mut self, alphabet: HashSet<char>) -> Self { self.bpe_trainer_builder = self.bpe_trainer_builder.initial_alphabet(alphabet); self } /// Set the continuing_subword_prefix #[must_use] pub fn continuing_subword_prefix(mut self, prefix: String) -> Self { self.bpe_trainer_builder = self.bpe_trainer_builder.continuing_subword_prefix(prefix); self } /// Set the end_of_word_suffix #[must_use] pub fn end_of_word_suffix(mut self, suffix: String) -> Self { self.bpe_trainer_builder = self.bpe_trainer_builder.end_of_word_suffix(suffix); self } /// Constructs the final BpeTrainer pub fn build(self) -> WordPieceTrainer { let bpe_trainer = self.bpe_trainer_builder.build(); WordPieceTrainer { bpe_trainer } } } /// Trains a `WordPiece` model. #[derive(Default, Clone, Deserialize, Serialize)] pub struct WordPieceTrainer { bpe_trainer: BpeTrainer, } impl WordPieceTrainer { pub fn min_frequency(&self) -> u64 { self.bpe_trainer.min_frequency } pub fn set_min_frequency(&mut self, freq: u64) { self.bpe_trainer.min_frequency = freq; } pub fn vocab_size(&self) -> usize { self.bpe_trainer.vocab_size } pub fn set_vocab_size(&mut self, size: usize) { self.bpe_trainer.vocab_size = size; } pub fn show_progress(&self) -> bool { self.bpe_trainer.show_progress } pub fn set_show_progress(&mut self, show_progress: bool) { self.bpe_trainer.show_progress = show_progress; } pub fn special_tokens(&self) -> &[AddedToken] { &self.bpe_trainer.special_tokens } pub fn set_special_tokens(&mut self, special_tokens: Vec<AddedToken>) { self.bpe_trainer.special_tokens = special_tokens; } pub fn limit_alphabet(&self) -> Option<usize> { self.bpe_trainer.limit_alphabet } pub fn set_limit_alphabet(&mut self, limit: Option<usize>) { self.bpe_trainer.limit_alphabet = limit; } pub fn initial_alphabet(&self) -> &HashSet<char> { &self.bpe_trainer.initial_alphabet } pub fn set_initial_alphabet(&mut self, alphabet: HashSet<char>) { self.bpe_trainer.initial_alphabet = alphabet; } pub fn continuing_subword_prefix(&self) -> &Option<String> { &self.bpe_trainer.continuing_subword_prefix } pub fn set_continuing_subword_prefix(&mut self, prefix: Option<String>) { self.bpe_trainer.continuing_subword_prefix = prefix; } pub fn end_of_word_suffix(&self) -> &Option<String> { &self.bpe_trainer.end_of_word_suffix } pub fn set_end_of_word_suffix(&mut self, suffix: Option<String>) { self.bpe_trainer.end_of_word_suffix = suffix; } pub fn builder() -> WordPieceTrainerBuilder { WordPieceTrainerBuilder::default() } pub fn train(&self, model: &mut WordPiece) -> Result<Vec<AddedToken>> { let mut bpe = BPE::default(); let special_tokens = self.bpe_trainer.train(&mut bpe)?; let new_wordpiece = WordPiece::from_bpe(&bpe); // Transfer the vocab model.vocab = new_wordpiece.vocab; model.vocab_r = new_wordpiece.vocab_r; // The continuing_subword_prefix is the only other option to be overriden by the trainer model.continuing_subword_prefix = new_wordpiece.continuing_subword_prefix; Ok(special_tokens) } } impl Trainer for WordPieceTrainer { type Model = WordPiece; fn train(&self, model: &mut WordPiece) -> Result<Vec<AddedToken>> { self.train(model) } fn should_show_progress(&self) -> bool { self.bpe_trainer.should_show_progress() } fn feed<I, S, F>(&mut self, iterator: I, process: F) -> Result<()> where I: Iterator<Item = S> + Send, S: AsRef<str> + Send, F: Fn(&str) -> Result<Vec<String>> + Sync, { self.bpe_trainer.feed(iterator, process) } }
tokenizers/tokenizers/src/models/wordpiece/trainer.rs/0
{ "file_path": "tokenizers/tokenizers/src/models/wordpiece/trainer.rs", "repo_id": "tokenizers", "token_count": 2499 }
use serde::{Deserialize, Serialize}; use crate::tokenizer::{PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior}; use crate::utils::macro_rules_attribute; use unicode_categories::UnicodeCategories; fn is_punc(x: char) -> bool { char::is_ascii_punctuation(&x) || x.is_punctuation() } #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[macro_rules_attribute(impl_serde_type!)] pub struct Punctuation { #[serde(default = "default_split")] pub behavior: SplitDelimiterBehavior, } fn default_split() -> SplitDelimiterBehavior { SplitDelimiterBehavior::Isolated } impl Punctuation { pub fn new(behavior: SplitDelimiterBehavior) -> Self { Self { behavior } } } impl Default for Punctuation { fn default() -> Self { Self::new(SplitDelimiterBehavior::Isolated) } } impl PreTokenizer for Punctuation { fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> { pretokenized.split(|_, s| s.split(is_punc, self.behavior)) } } #[cfg(test)] mod tests { use super::*; use crate::{OffsetReferential, OffsetType}; #[test] fn punctuation_basic() { let pretok = Punctuation::default(); let mut pretokenized: PreTokenizedString = "Hey friend! How are you?!?".into(); pretok.pre_tokenize(&mut pretokenized).unwrap(); assert_eq!( pretokenized .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), vec![ ("Hey friend", (0, 10)), ("!", (10, 11)), (" How are you", (11, 27)), ("?", (27, 28)), ("!", (28, 29)), ("?", (29, 30)), ] ); } #[test] fn deserialization() { let punctuation: Punctuation = serde_json::from_str(r#"{"type": "Punctuation"}"#).unwrap(); assert_eq!(punctuation, Punctuation::default()); assert_eq!( punctuation, Punctuation::new(SplitDelimiterBehavior::Isolated) ); } #[test] #[should_panic] fn deserialization_erroneous() { let _punctuation: Punctuation = serde_json::from_str(r#"{"type": "WhitespaceSplit"}"#).unwrap(); } }
tokenizers/tokenizers/src/pre_tokenizers/punctuation.rs/0
{ "file_path": "tokenizers/tokenizers/src/pre_tokenizers/punctuation.rs", "repo_id": "tokenizers", "token_count": 1103 }
use crate::utils::SysRegex; use crate::{Offsets, Result}; use regex::Regex; /// Pattern used to split a NormalizedString pub trait Pattern { /// Slice the given string in a list of pattern match positions, with /// a boolean indicating whether this is a match or not. /// /// This method *must* cover the whole string in its outputs, with /// contiguous ordered slices. fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>>; } impl Pattern for char { fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> { let is_char = |c: char| -> bool { c == *self }; is_char.find_matches(inside) } } impl Pattern for &str { fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> { if self.is_empty() { // If we try to find the matches with an empty string, just don't match anything return Ok(vec![((0, inside.chars().count()), false)]); } let re = Regex::new(&regex::escape(self))?; (&re).find_matches(inside) } } impl Pattern for &String { fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> { let s: &str = self; s.find_matches(inside) } } impl Pattern for &Regex { fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> { if inside.is_empty() { return Ok(vec![((0, 0), false)]); } let mut prev = 0; let mut splits = Vec::with_capacity(inside.len()); for m in self.find_iter(inside) { if prev != m.start() { splits.push(((prev, m.start()), false)); } splits.push(((m.start(), m.end()), true)); prev = m.end(); } if prev != inside.len() { splits.push(((prev, inside.len()), false)) } Ok(splits) } } impl Pattern for &SysRegex { fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> { if inside.is_empty() { return Ok(vec![((0, 0), false)]); } let mut prev = 0; let mut splits = Vec::with_capacity(inside.len()); for (start, end) in self.find_iter(inside) { if prev != start { splits.push(((prev, start), false)); } splits.push(((start, end), true)); prev = end; } if prev != inside.len() { splits.push(((prev, inside.len()), false)) } Ok(splits) } } impl<F> Pattern for F where F: Fn(char) -> bool, { fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> { if inside.is_empty() { return Ok(vec![((0, 0), false)]); } let mut last_offset = 0; let mut last_seen = 0; let mut matches = inside .char_indices() .flat_map(|(b, c)| { last_seen = b + c.len_utf8(); if self(c) { let mut events = Vec::with_capacity(2); if last_offset < b { // We need to emit what was before this match events.push(((last_offset, b), false)); } events.push(((b, b + c.len_utf8()), true)); last_offset = b + c.len_utf8(); events } else { vec![] } }) .collect::<Vec<_>>(); // Do not forget the last potential split if last_seen > last_offset { matches.push(((last_offset, last_seen), false)); } Ok(matches) } } /// Invert the `is_match` flags for the wrapped Pattern. This is usefull /// for example when we use a regex that matches words instead of a delimiter, /// and we want to match the delimiter. pub struct Invert<P: Pattern>(pub P); impl<P: Pattern> Pattern for Invert<P> { fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> { Ok(self .0 .find_matches(inside)? .into_iter() .map(|(offsets, flag)| (offsets, !flag)) .collect()) } } #[cfg(test)] mod tests { use super::*; use regex::Regex; macro_rules! do_test { ($inside: expr, $pattern: expr => @ERROR) => { assert!($pattern.find_matches($inside).is_err()); }; ($inside: expr, $pattern: expr => $result: expr) => { assert_eq!($pattern.find_matches($inside).unwrap(), $result); assert_eq!( Invert($pattern).find_matches($inside).unwrap(), $result .into_iter() .map(|v: (Offsets, bool)| (v.0, !v.1)) .collect::<Vec<_>>() ); }; } #[test] fn char() { do_test!("aba", 'a' => vec![((0, 1), true), ((1, 2), false), ((2, 3), true)]); do_test!("bbbba", 'a' => vec![((0, 4), false), ((4, 5), true)]); do_test!("aabbb", 'a' => vec![((0, 1), true), ((1, 2), true), ((2, 5), false)]); do_test!("", 'a' => vec![((0, 0), false)]); do_test!("aaa", 'b' => vec![((0, 3), false)]); } #[test] fn str() { do_test!("aba", "a" => vec![((0, 1), true), ((1, 2), false), ((2, 3), true)]); do_test!("bbbba", "a" => vec![((0, 4), false), ((4, 5), true)]); do_test!("aabbb", "a" => vec![((0, 1), true), ((1, 2), true), ((2, 5), false)]); do_test!("aabbb", "ab" => vec![((0, 1), false), ((1, 3), true), ((3, 5), false)]); do_test!("aabbab", "ab" => vec![((0, 1), false), ((1, 3), true), ((3, 4), false), ((4, 6), true)] ); do_test!("", "" => vec![((0, 0), false)]); do_test!("aaa", "" => vec![((0, 3), false)]); do_test!("aaa", "b" => vec![((0, 3), false)]); } #[test] fn functions() { let is_b = |c| c == 'b'; do_test!("aba", is_b => vec![((0, 1), false), ((1, 2), true), ((2, 3), false)]); do_test!("aaaab", is_b => vec![((0, 4), false), ((4, 5), true)]); do_test!("bbaaa", is_b => vec![((0, 1), true), ((1, 2), true), ((2, 5), false)]); do_test!("", is_b => vec![((0, 0), false)]); do_test!("aaa", is_b => vec![((0, 3), false)]); } #[test] fn regex() { let is_whitespace = Regex::new(r"\s+").unwrap(); do_test!("a b", &is_whitespace => vec![((0, 1), false), ((1, 4), true), ((4, 5), false)]); do_test!(" a b ", &is_whitespace => vec![((0, 3), true), ((3, 4), false), ((4, 7), true), ((7, 8), false), ((8, 11), true)] ); do_test!("", &is_whitespace => vec![((0, 0), false)]); do_test!("𝔾𝕠𝕠𝕕 𝕞𝕠𝕣𝕟𝕚𝕟𝕘", &is_whitespace => vec![((0, 16), false), ((16, 17), true), ((17, 45), false)] ); do_test!("aaa", &is_whitespace => vec![((0, 3), false)]); } #[test] fn sys_regex() { let is_whitespace = SysRegex::new(r"\s+").unwrap(); do_test!("a b", &is_whitespace => vec![((0, 1), false), ((1, 4), true), ((4, 5), false)]); do_test!(" a b ", &is_whitespace => vec![((0, 3), true), ((3, 4), false), ((4, 7), true), ((7, 8), false), ((8, 11), true)] ); do_test!("", &is_whitespace => vec![((0, 0), false)]); do_test!("𝔾𝕠𝕠𝕕 𝕞𝕠𝕣𝕟𝕚𝕟𝕘", &is_whitespace => vec![((0, 16), false), ((16, 17), true), ((17, 45), false)] ); do_test!("aaa", &is_whitespace => vec![((0, 3), false)]); } }
tokenizers/tokenizers/src/tokenizer/pattern.rs/0
{ "file_path": "tokenizers/tokenizers/src/tokenizer/pattern.rs", "repo_id": "tokenizers", "token_count": 3903 }
#![cfg(feature = "http")] use tokenizers::{FromPretrainedParameters, Result, Tokenizer}; #[test] fn test_from_pretrained() -> Result<()> { let tokenizer = Tokenizer::from_pretrained("bert-base-cased", None)?; let encoding = tokenizer.encode("Hey there dear friend!", false)?; assert_eq!( encoding.get_tokens(), &["Hey", "there", "dear", "friend", "!"] ); Ok(()) } #[test] fn test_from_pretrained_revision() -> Result<()> { let tokenizer = Tokenizer::from_pretrained("anthony/tokenizers-test", None)?; let encoding = tokenizer.encode("Hey there dear friend!", false)?; assert_eq!( encoding.get_tokens(), &["hey", "there", "dear", "friend", "!"] ); let tokenizer = Tokenizer::from_pretrained( "anthony/tokenizers-test", Some(FromPretrainedParameters { revision: "gpt-2".to_string(), ..Default::default() }), )?; let encoding = tokenizer.encode("Hey there dear friend!", false)?; assert_eq!( encoding.get_tokens(), &["Hey", "Ġthere", "Ġdear", "Ġfriend", "!"] ); Ok(()) } #[test] fn test_from_pretrained_invalid_model() { let tokenizer = Tokenizer::from_pretrained("docs?", None); assert!(tokenizer.is_err()); } #[test] fn test_from_pretrained_invalid_revision() { let tokenizer = Tokenizer::from_pretrained( "bert-base-cased", Some(FromPretrainedParameters { revision: "gpt?".to_string(), ..Default::default() }), ); assert!(tokenizer.is_err()); }
tokenizers/tokenizers/tests/from_pretrained.rs/0
{ "file_path": "tokenizers/tokenizers/tests/from_pretrained.rs", "repo_id": "tokenizers", "token_count": 683 }
// See the Electron documentation for details on how to use preload scripts: // https://www.electronjs.org/docs/latest/tutorial/process-model#preload-scripts const { contextBridge, ipcRenderer } = require('electron'); // Here, we use the `contextBridge` API to expose a custom API to the renderer process. // This API allows the renderer process to invoke the `transformers:run` event in the main process. contextBridge.exposeInMainWorld('electronAPI', { run: (text) => ipcRenderer.invoke('transformers:run', text) });
transformers.js/examples/electron/src/preload.js/0
{ "file_path": "transformers.js/examples/electron/src/preload.js", "repo_id": "transformers.js", "token_count": 153 }
// Adapted from https://github.com/xenova/transformers.js/blob/c367f9d68b809bbbf81049c808bf6d219d761d23/src/utils/hub.js#L330 export async function getCachedFile(url) { let cache; try { cache = await caches.open('semantic-audio-search'); const cachedResponse = await cache.match(url); if (cachedResponse) { return await cachedResponse.arrayBuffer(); } } catch (e) { console.warn('Unable to open cache', e); } // No cache, or cache failed to open. Fetch the file. const response = await fetch(url); const buffer = await response.arrayBuffer(); if (cache) { try { // NOTE: We use `new Response(buffer, ...)` instead of `response.clone()` to handle LFS files await cache.put(url, new Response(buffer, { headers: response.headers, })); } catch (e) { console.warn('Unable to cache file', e); } } return buffer; } export async function getCachedJSON(url) { let buffer = await getCachedFile(url); let decoder = new TextDecoder('utf-8'); let jsonData = decoder.decode(buffer); return JSON.parse(jsonData); }
transformers.js/examples/semantic-audio-search/utils.js/0
{ "file_path": "transformers.js/examples/semantic-audio-search/utils.js", "repo_id": "transformers.js", "token_count": 502 }
@tailwind base; @tailwind components; @tailwind utilities; :root { --foreground-rgb: 255, 255, 255; --background-start-rgb: 0, 0, 0; --background-end-rgb: 0, 0, 0; } body { color: rgb(var(--foreground-rgb)); background: linear-gradient( to bottom, transparent, rgb(var(--background-end-rgb)) ) rgb(var(--background-start-rgb)); }
transformers.js/examples/semantic-image-search-client/src/app/globals.css/0
{ "file_path": "transformers.js/examples/semantic-image-search-client/src/app/globals.css", "repo_id": "transformers.js", "token_count": 157 }
html, body { font-family: Arial, Helvetica, sans-serif; } .container { margin: 40px auto; width: max(50vw, 400px); display: flex; flex-direction: column; align-items: center; } .custom-file-upload { display: flex; align-items: center; cursor: pointer; gap: 10px; border: 2px solid black; padding: 8px 16px; cursor: pointer; border-radius: 6px; } #file-upload { display: none; } .upload-icon { width: 30px; } #image-container { width: 100%; margin-top: 20px; position: relative; } #image-container>img { width: 100%; } .bounding-box { position: absolute; box-sizing: border-box; border-width: 2px; border-style: solid; } .bounding-box-label { color: white; position: absolute; font-size: 12px; margin-top: -16px; margin-left: -2px; padding: 1px; }
transformers.js/examples/vanilla-js/style.css/0
{ "file_path": "transformers.js/examples/vanilla-js/style.css", "repo_id": "transformers.js", "token_count": 389 }
* { box-sizing: border-box; padding: 0; margin: 0; font-family: sans-serif; } html, body { height: 100%; } body { padding: 16px 32px; } body, #container { display: flex; flex-direction: column; justify-content: center; align-items: center; } #controls { display: flex; padding: 1rem; gap: 1rem; } #controls>div { text-align: center; } h1, h3 { text-align: center; } h3 { margin-top: 0.5rem; } #container { position: relative; width: 720px; height: 405px; max-width: 100%; max-height: 100%; border: 2px dashed #D1D5DB; border-radius: 0.75rem; overflow: hidden; margin-top: 1rem; background-size: 100% 100%; background-position: center; background-repeat: no-repeat; } #status { min-height: 16px; margin: 8px 0; } video { width: 100%; height: 100%; } input[type="text"] { padding: 0.25rem 0.5rem; border: 1px solid #D1D5DB; border-radius: 0.25rem; margin-top: 2px; } input[type="range"] { margin-top: 6px; } #overlay { position: absolute; top: 0; left: 0; background-color: rgba(255, 255, 255, 0.9); font-size: 1.25rem; border-radius: 2px; } #overlay:not(:empty) { padding: 0.5rem; }
transformers.js/examples/webgpu-clip/style.css/0
{ "file_path": "transformers.js/examples/webgpu-clip/style.css", "repo_id": "transformers.js", "token_count": 510 }
import './style.css'; import { AutoModel, AutoProcessor, RawImage } from '@xenova/transformers'; async function hasFp16() { try { const adapter = await navigator.gpu.requestAdapter() return adapter.features.has('shader-f16') } catch (e) { return false } } // Reference the elements that we will need const status = document.getElementById('status'); const canvas = document.createElement('canvas'); const outputCanvas = document.getElementById('output-canvas'); const video = document.getElementById('video'); const sizeSlider = document.getElementById('size'); const sizeLabel = document.getElementById('size-value'); const scaleSlider = document.getElementById('scale'); const scaleLabel = document.getElementById('scale-value'); function setStreamSize(width, height) { video.width = outputCanvas.width = canvas.width = Math.round(width); video.height = outputCanvas.height = canvas.height = Math.round(height); } status.textContent = 'Loading model...'; // Load model and processor const model_id = 'onnx-community/depth-anything-v2-small'; let model; try { model = await AutoModel.from_pretrained(model_id, { device: 'webgpu', // Use fp16 if available, otherwise use fp32 dtype: (await hasFp16()) ? 'fp16' : 'fp32', }); } catch (err) { status.textContent = err.message; alert(err.message) throw err; } const processor = await AutoProcessor.from_pretrained(model_id); // Set up controls let size = 504; processor.feature_extractor.size = { width: size, height: size }; sizeSlider.addEventListener('input', () => { size = Number(sizeSlider.value); processor.feature_extractor.size = { width: size, height: size }; sizeLabel.textContent = size; }); sizeSlider.disabled = false; let scale = 0.4; scaleSlider.addEventListener('input', () => { scale = Number(scaleSlider.value); setStreamSize(video.videoWidth * scale, video.videoHeight * scale); scaleLabel.textContent = scale; }); scaleSlider.disabled = false; status.textContent = 'Ready'; let isProcessing = false; let previousTime; const context = canvas.getContext('2d', { willReadFrequently: true }); const outputContext = outputCanvas.getContext('2d', { willReadFrequently: true }); function updateCanvas() { const { width, height } = canvas; if (!isProcessing) { isProcessing = true; (async function () { // Read the current frame from the video context.drawImage(video, 0, 0, width, height); const currentFrame = context.getImageData(0, 0, width, height); const image = new RawImage(currentFrame.data, width, height, 4); // Pre-process image const inputs = await processor(image); // Predict depth map const { predicted_depth } = await model(inputs); const data = predicted_depth.data; const [bs, oh, ow] = predicted_depth.dims; // Normalize the depth map let min = Infinity; let max = -Infinity; outputCanvas.width = ow; outputCanvas.height = oh; for (let i = 0; i < data.length; ++i) { const v = data[i]; if (v < min) min = v; if (v > max) max = v; } const range = max - min; const imageData = new Uint8ClampedArray(4 * data.length); for (let i = 0; i < data.length; ++i) { const offset = 4 * i; imageData[offset] = 255; // Set base color to red // Set alpha to normalized depth value imageData[offset + 3] = 255 * (1 - (data[i] - min) / range); } const outPixelData = new ImageData(imageData, ow, oh); outputContext.putImageData(outPixelData, 0, 0); if (previousTime !== undefined) { const fps = 1000 / (performance.now() - previousTime); status.textContent = `FPS: ${fps.toFixed(2)}`; } previousTime = performance.now(); isProcessing = false; })(); } window.requestAnimationFrame(updateCanvas); } // Start the video stream navigator.mediaDevices.getUserMedia( { video: { width: 720, height: 720 } }, // Ask for square video ).then((stream) => { // Set up the video and canvas elements. video.srcObject = stream; video.play(); const videoTrack = stream.getVideoTracks()[0]; const { width, height } = videoTrack.getSettings(); setStreamSize(width * scale, height * scale); // Start the animation loop setTimeout(updateCanvas, 50); }).catch((error) => { alert(error); });
transformers.js/examples/webgpu-video-depth-estimation/main.js/0
{ "file_path": "transformers.js/examples/webgpu-video-depth-estimation/main.js", "repo_id": "transformers.js", "token_count": 1857 }