code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def infer_framework(model_class):
"""
Infers the framework of a given model without using isinstance(), because we cannot guarantee that the relevant
classes are imported or available.
"""
for base_class in inspect.getmro(model_class):
module = base_class.__module__
name = base_class.__name__
if module.startswith("tensorflow") or module.startswith("keras") or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch") or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax") or module.startswith("jax") or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f"Could not infer framework from class {model_class}.")
|
Infers the framework of a given model without using isinstance(), because we cannot guarantee that the relevant
classes are imported or available.
|
infer_framework
|
python
|
huggingface/transformers
|
src/transformers/utils/generic.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/generic.py
|
Apache-2.0
|
def torch_int(x):
"""
Casts an input to a torch int64 tensor if we are in a tracing context, otherwise to a Python int.
"""
if not is_torch_available():
return int(x)
import torch
return x.to(torch.int64) if torch.jit.is_tracing() and isinstance(x, torch.Tensor) else int(x)
|
Casts an input to a torch int64 tensor if we are in a tracing context, otherwise to a Python int.
|
torch_int
|
python
|
huggingface/transformers
|
src/transformers/utils/generic.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/generic.py
|
Apache-2.0
|
def torch_float(x):
"""
Casts an input to a torch float32 tensor if we are in a tracing context, otherwise to a Python float.
"""
if not is_torch_available():
return int(x)
import torch
return x.to(torch.float32) if torch.jit.is_tracing() and isinstance(x, torch.Tensor) else int(x)
|
Casts an input to a torch float32 tensor if we are in a tracing context, otherwise to a Python float.
|
torch_float
|
python
|
huggingface/transformers
|
src/transformers/utils/generic.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/generic.py
|
Apache-2.0
|
def filter_out_non_signature_kwargs(extra: Optional[list] = None):
"""
Decorator to filter out named arguments that are not in the function signature.
This decorator ensures that only the keyword arguments that match the function's signature, or are specified in the
`extra` list, are passed to the function. Any additional keyword arguments are filtered out and a warning is issued.
Parameters:
extra (`Optional[list]`, *optional*):
A list of extra keyword argument names that are allowed even if they are not in the function's signature.
Returns:
Callable:
A decorator that wraps the function and filters out invalid keyword arguments.
Example usage:
```python
@filter_out_non_signature_kwargs(extra=["allowed_extra_arg"])
def my_function(arg1, arg2, **kwargs):
print(arg1, arg2, kwargs)
my_function(arg1=1, arg2=2, allowed_extra_arg=3, invalid_arg=4)
# This will print: 1 2 {"allowed_extra_arg": 3}
# And issue a warning: "The following named arguments are not valid for `my_function` and were ignored: 'invalid_arg'"
```
"""
extra = extra or []
extra_params_to_pass = set(extra)
def decorator(func):
sig = inspect.signature(func)
function_named_args = set(sig.parameters.keys())
valid_kwargs_to_pass = function_named_args.union(extra_params_to_pass)
# Required for better warning message
is_instance_method = "self" in function_named_args
is_class_method = "cls" in function_named_args
# Mark function as decorated
func._filter_out_non_signature_kwargs = True
@wraps(func)
def wrapper(*args, **kwargs):
valid_kwargs = {}
invalid_kwargs = {}
for k, v in kwargs.items():
if k in valid_kwargs_to_pass:
valid_kwargs[k] = v
else:
invalid_kwargs[k] = v
if invalid_kwargs:
invalid_kwargs_names = [f"'{k}'" for k in invalid_kwargs.keys()]
invalid_kwargs_names = ", ".join(invalid_kwargs_names)
# Get the class name for better warning message
if is_instance_method:
cls_prefix = args[0].__class__.__name__ + "."
elif is_class_method:
cls_prefix = args[0].__name__ + "."
else:
cls_prefix = ""
warnings.warn(
f"The following named arguments are not valid for `{cls_prefix}{func.__name__}`"
f" and were ignored: {invalid_kwargs_names}",
UserWarning,
stacklevel=2,
)
return func(*args, **valid_kwargs)
return wrapper
return decorator
|
Decorator to filter out named arguments that are not in the function signature.
This decorator ensures that only the keyword arguments that match the function's signature, or are specified in the
`extra` list, are passed to the function. Any additional keyword arguments are filtered out and a warning is issued.
Parameters:
extra (`Optional[list]`, *optional*):
A list of extra keyword argument names that are allowed even if they are not in the function's signature.
Returns:
Callable:
A decorator that wraps the function and filters out invalid keyword arguments.
Example usage:
```python
@filter_out_non_signature_kwargs(extra=["allowed_extra_arg"])
def my_function(arg1, arg2, **kwargs):
print(arg1, arg2, kwargs)
my_function(arg1=1, arg2=2, allowed_extra_arg=3, invalid_arg=4)
# This will print: 1 2 {"allowed_extra_arg": 3}
# And issue a warning: "The following named arguments are not valid for `my_function` and were ignored: 'invalid_arg'"
```
|
filter_out_non_signature_kwargs
|
python
|
huggingface/transformers
|
src/transformers/utils/generic.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/generic.py
|
Apache-2.0
|
def is_timm_local_checkpoint(pretrained_model_path: str) -> bool:
"""
Checks whether a checkpoint is a timm model checkpoint.
"""
if pretrained_model_path is None:
return False
# in case it's Path, not str
pretrained_model_path = str(pretrained_model_path)
is_file = os.path.isfile(pretrained_model_path)
is_dir = os.path.isdir(pretrained_model_path)
# pretrained_model_path is a file
if is_file and pretrained_model_path.endswith(".json"):
with open(pretrained_model_path) as f:
config_dict = json.load(f)
return is_timm_config_dict(config_dict)
# pretrained_model_path is a directory with a config.json
if is_dir and os.path.exists(os.path.join(pretrained_model_path, "config.json")):
with open(os.path.join(pretrained_model_path, "config.json")) as f:
config_dict = json.load(f)
return is_timm_config_dict(config_dict)
return False
|
Checks whether a checkpoint is a timm model checkpoint.
|
is_timm_local_checkpoint
|
python
|
huggingface/transformers
|
src/transformers/utils/generic.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/generic.py
|
Apache-2.0
|
def set_attribute_for_modules(module: "torch.nn.Module", key: str, value: Any):
"""
Set a value to a module and all submodules.
"""
setattr(module, key, value)
for submodule in module.children():
set_attribute_for_modules(submodule, key, value)
|
Set a value to a module and all submodules.
|
set_attribute_for_modules
|
python
|
huggingface/transformers
|
src/transformers/utils/generic.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/generic.py
|
Apache-2.0
|
def del_attribute_from_modules(module: "torch.nn.Module", key: str):
"""
Delete a value from a module and all submodules.
"""
# because we might remove it previously in case it's a shared module, e.g. activation function
if hasattr(module, key):
delattr(module, key)
for submodule in module.children():
del_attribute_from_modules(submodule, key)
|
Delete a value from a module and all submodules.
|
del_attribute_from_modules
|
python
|
huggingface/transformers
|
src/transformers/utils/generic.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/generic.py
|
Apache-2.0
|
def can_return_tuple(func):
"""
Decorator to wrap model method, to call output.to_tuple() if return_dict=False passed as a kwarg or
use_return_dict=False is set in the config.
Note:
output.to_tuple() convert output to tuple skipping all `None` values.
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
is_requested_to_return_tuple = kwargs.pop("return_dict", True) is False
is_configured_to_return_tuple = self.config.use_return_dict is False if hasattr(self, "config") else False
# The following allows to convert output to tuple ONLY on top level forward call,
# while internal modules of the model will return Output objects
# to be able to use name-based attribute access in modeling code.
# We will check if we are on top level module, if so, turn off to tuple conversion for all
# underling calls.
is_top_level_module = getattr(self, "_is_top_level_module", True)
if is_configured_to_return_tuple and is_top_level_module:
set_attribute_for_modules(self, "_is_top_level_module", False)
try:
output = func(self, *args, **kwargs)
if is_requested_to_return_tuple or (is_configured_to_return_tuple and is_top_level_module):
output = output.to_tuple()
finally:
# Remove the flag after the model forward call is finished.
if is_configured_to_return_tuple and is_top_level_module:
del_attribute_from_modules(self, "_is_top_level_module")
return output
return wrapper
|
Decorator to wrap model method, to call output.to_tuple() if return_dict=False passed as a kwarg or
use_return_dict=False is set in the config.
Note:
output.to_tuple() convert output to tuple skipping all `None` values.
|
can_return_tuple
|
python
|
huggingface/transformers
|
src/transformers/utils/generic.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/generic.py
|
Apache-2.0
|
def list_repo_templates(
repo_id: str,
*,
local_files_only: bool,
revision: Optional[str] = None,
cache_dir: Optional[str] = None,
) -> list[str]:
"""List template files from a repo.
A template is a jinja file located under the `additional_chat_templates/` folder.
If working in offline mode or if internet is down, the method will list jinja template from the local cache - if any.
"""
if not local_files_only:
try:
return [
entry.path.removeprefix(f"{CHAT_TEMPLATE_DIR}/")
for entry in list_repo_tree(
repo_id=repo_id, revision=revision, path_in_repo=CHAT_TEMPLATE_DIR, recursive=False
)
if entry.path.endswith(".jinja")
]
except (GatedRepoError, RepositoryNotFoundError, RevisionNotFoundError):
raise # valid errors => do not catch
except (ConnectionError, HTTPError):
pass # offline mode, internet down, etc. => try local files
# check local files
try:
snapshot_dir = snapshot_download(
repo_id=repo_id, revision=revision, cache_dir=cache_dir, local_files_only=True
)
except LocalEntryNotFoundError: # No local repo means no local files
return []
templates_dir = Path(snapshot_dir, CHAT_TEMPLATE_DIR)
if not templates_dir.is_dir():
return []
return [entry.stem for entry in templates_dir.iterdir() if entry.is_file() and entry.name.endswith(".jinja")]
|
List template files from a repo.
A template is a jinja file located under the `additional_chat_templates/` folder.
If working in offline mode or if internet is down, the method will list jinja template from the local cache - if any.
|
list_repo_templates
|
python
|
huggingface/transformers
|
src/transformers/utils/hub.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/hub.py
|
Apache-2.0
|
def http_user_agent(user_agent: Union[dict, str, None] = None) -> str:
"""
Formats a user-agent string with basic info about a request.
"""
ua = f"transformers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"
if is_torch_available():
ua += f"; torch/{_torch_version}"
if is_tf_available():
ua += f"; tensorflow/{_tf_version}"
if constants.HF_HUB_DISABLE_TELEMETRY:
return ua + "; telemetry/off"
if is_training_run_on_sagemaker():
ua += "; " + "; ".join(f"{k}/{v}" for k, v in define_sagemaker_information().items())
# CI will set this value to True
if os.environ.get("TRANSFORMERS_IS_CI", "").upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(user_agent, dict):
ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items())
elif isinstance(user_agent, str):
ua += "; " + user_agent
return ua
|
Formats a user-agent string with basic info about a request.
|
http_user_agent
|
python
|
huggingface/transformers
|
src/transformers/utils/hub.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/hub.py
|
Apache-2.0
|
def download_url(url, proxies=None):
"""
Downloads a given url in a temporary file. This function is not safe to use in multiple processes. Its only use is
for deprecated behavior allowing to download config/models with a single url instead of using the Hub.
Args:
url (`str`): The url of the file to download.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
Returns:
`str`: The location of the temporary file where the url was downloaded.
"""
warnings.warn(
f"Using `from_pretrained` with the url of a file (here {url}) is deprecated and won't be possible anymore in"
" v5 of Transformers. You should host your file on the Hub (hf.co) instead and use the repository ID. Note"
" that this is not compatible with the caching system (your file will be downloaded at each execution) or"
" multiple processes (each process will download the file in a different temporary file).",
FutureWarning,
)
tmp_fd, tmp_file = tempfile.mkstemp()
with os.fdopen(tmp_fd, "wb") as f:
http_get(url, f, proxies=proxies)
return tmp_file
|
Downloads a given url in a temporary file. This function is not safe to use in multiple processes. Its only use is
for deprecated behavior allowing to download config/models with a single url instead of using the Hub.
Args:
url (`str`): The url of the file to download.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
Returns:
`str`: The location of the temporary file where the url was downloaded.
|
download_url
|
python
|
huggingface/transformers
|
src/transformers/utils/hub.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/hub.py
|
Apache-2.0
|
def has_file(
path_or_repo: Union[str, os.PathLike],
filename: str,
revision: Optional[str] = None,
proxies: Optional[dict[str, str]] = None,
token: Optional[Union[bool, str]] = None,
*,
local_files_only: bool = False,
cache_dir: Union[str, Path, None] = None,
repo_type: Optional[str] = None,
**deprecated_kwargs,
):
"""
Checks if a repo contains a given file without downloading it. Works for remote repos and local folders.
If offline mode is enabled, checks if the file exists in the cache.
<Tip warning={false}>
This function will raise an error if the repository `path_or_repo` is not valid or if `revision` does not exist for
this repo, but will return False for regular connection errors.
</Tip>
"""
use_auth_token = deprecated_kwargs.pop("use_auth_token", None)
if use_auth_token is not None:
warnings.warn(
"The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
FutureWarning,
)
if token is not None:
raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.")
token = use_auth_token
# If path to local directory, check if the file exists
if os.path.isdir(path_or_repo):
return os.path.isfile(os.path.join(path_or_repo, filename))
# Else it's a repo => let's check if the file exists in local cache or on the Hub
# Check if file exists in cache
# This information might be outdated so it's best to also make a HEAD call (if allowed).
cached_path = try_to_load_from_cache(
repo_id=path_or_repo,
filename=filename,
revision=revision,
repo_type=repo_type,
cache_dir=cache_dir,
)
has_file_in_cache = isinstance(cached_path, str)
# If local_files_only, don't try the HEAD call
if local_files_only:
return has_file_in_cache
# Check if the file exists
try:
response = get_session().head(
hf_hub_url(path_or_repo, filename=filename, revision=revision, repo_type=repo_type),
headers=build_hf_headers(token=token, user_agent=http_user_agent()),
allow_redirects=False,
proxies=proxies,
timeout=10,
)
except (requests.exceptions.SSLError, requests.exceptions.ProxyError):
# Actually raise for those subclasses of ConnectionError
raise
except (
requests.exceptions.ConnectionError,
requests.exceptions.Timeout,
OfflineModeIsEnabled,
):
return has_file_in_cache
try:
hf_raise_for_status(response)
return True
except GatedRepoError as e:
logger.error(e)
raise OSError(
f"{path_or_repo} is a gated repository. Make sure to request access at "
f"https://huggingface.co/{path_or_repo} and pass a token having permission to this repo either by "
"logging in with `huggingface-cli login` or by passing `token=<your_token>`."
) from e
except RepositoryNotFoundError as e:
logger.error(e)
raise OSError(f"{path_or_repo} is not a local folder or a valid repository name on 'https://hf.co'.") from e
except RevisionNotFoundError as e:
logger.error(e)
raise OSError(
f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for this "
f"model name. Check the model page at 'https://huggingface.co/{path_or_repo}' for available revisions."
) from e
except EntryNotFoundError:
return False # File does not exist
except requests.HTTPError:
# Any authentication/authorization error will be caught here => default to cache
return has_file_in_cache
|
Checks if a repo contains a given file without downloading it. Works for remote repos and local folders.
If offline mode is enabled, checks if the file exists in the cache.
<Tip warning={false}>
This function will raise an error if the repository `path_or_repo` is not valid or if `revision` does not exist for
this repo, but will return False for regular connection errors.
</Tip>
|
has_file
|
python
|
huggingface/transformers
|
src/transformers/utils/hub.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/hub.py
|
Apache-2.0
|
def _create_repo(
self,
repo_id: str,
private: Optional[bool] = None,
token: Optional[Union[bool, str]] = None,
repo_url: Optional[str] = None,
organization: Optional[str] = None,
) -> str:
"""
Create the repo if needed, cleans up repo_id with deprecated kwargs `repo_url` and `organization`, retrieves
the token.
"""
if repo_url is not None:
warnings.warn(
"The `repo_url` argument is deprecated and will be removed in v5 of Transformers. Use `repo_id` "
"instead."
)
if repo_id is not None:
raise ValueError(
"`repo_id` and `repo_url` are both specified. Please set only the argument `repo_id`."
)
repo_id = repo_url.replace(f"{HUGGINGFACE_CO_RESOLVE_ENDPOINT}/", "")
if organization is not None:
warnings.warn(
"The `organization` argument is deprecated and will be removed in v5 of Transformers. Set your "
"organization directly in the `repo_id` passed instead (`repo_id={organization}/{model_id}`)."
)
if not repo_id.startswith(organization):
if "/" in repo_id:
repo_id = repo_id.split("/")[-1]
repo_id = f"{organization}/{repo_id}"
url = create_repo(repo_id=repo_id, token=token, private=private, exist_ok=True)
return url.repo_id
|
Create the repo if needed, cleans up repo_id with deprecated kwargs `repo_url` and `organization`, retrieves
the token.
|
_create_repo
|
python
|
huggingface/transformers
|
src/transformers/utils/hub.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/hub.py
|
Apache-2.0
|
def _upload_modified_files(
self,
working_dir: Union[str, os.PathLike],
repo_id: str,
files_timestamps: dict[str, float],
commit_message: Optional[str] = None,
token: Optional[Union[bool, str]] = None,
create_pr: bool = False,
revision: Optional[str] = None,
commit_description: Optional[str] = None,
):
"""
Uploads all modified files in `working_dir` to `repo_id`, based on `files_timestamps`.
"""
if commit_message is None:
if "Model" in self.__class__.__name__:
commit_message = "Upload model"
elif "Config" in self.__class__.__name__:
commit_message = "Upload config"
elif "Tokenizer" in self.__class__.__name__:
commit_message = "Upload tokenizer"
elif "FeatureExtractor" in self.__class__.__name__:
commit_message = "Upload feature extractor"
elif "Processor" in self.__class__.__name__:
commit_message = "Upload processor"
else:
commit_message = f"Upload {self.__class__.__name__}"
modified_files = [
f
for f in os.listdir(working_dir)
if f not in files_timestamps or os.path.getmtime(os.path.join(working_dir, f)) > files_timestamps[f]
]
# filter for actual files + folders at the root level
modified_files = [
f
for f in modified_files
if os.path.isfile(os.path.join(working_dir, f)) or os.path.isdir(os.path.join(working_dir, f))
]
operations = []
# upload standalone files
for file in modified_files:
if os.path.isdir(os.path.join(working_dir, file)):
# go over individual files of folder
for f in os.listdir(os.path.join(working_dir, file)):
operations.append(
CommitOperationAdd(
path_or_fileobj=os.path.join(working_dir, file, f), path_in_repo=os.path.join(file, f)
)
)
else:
operations.append(
CommitOperationAdd(path_or_fileobj=os.path.join(working_dir, file), path_in_repo=file)
)
if revision is not None and not revision.startswith("refs/pr"):
try:
create_branch(repo_id=repo_id, branch=revision, token=token, exist_ok=True)
except HfHubHTTPError as e:
if e.response.status_code == 403 and create_pr:
# If we are creating a PR on a repo we don't have access to, we can't create the branch.
# so let's assume the branch already exists. If it's not the case, an error will be raised when
# calling `create_commit` below.
pass
else:
raise
logger.info(f"Uploading the following files to {repo_id}: {','.join(modified_files)}")
return create_commit(
repo_id=repo_id,
operations=operations,
commit_message=commit_message,
commit_description=commit_description,
token=token,
create_pr=create_pr,
revision=revision,
)
|
Uploads all modified files in `working_dir` to `repo_id`, based on `files_timestamps`.
|
_upload_modified_files
|
python
|
huggingface/transformers
|
src/transformers/utils/hub.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/hub.py
|
Apache-2.0
|
def send_example_telemetry(example_name, *example_args, framework="pytorch"):
"""
Sends telemetry that helps tracking the examples use.
Args:
example_name (`str`): The name of the example.
*example_args (dataclasses or `argparse.ArgumentParser`): The arguments to the script. This function will only
try to extract the model and dataset name from those. Nothing else is tracked.
framework (`str`, *optional*, defaults to `"pytorch"`): The framework for the example.
"""
if is_offline_mode():
return
data = {"example": example_name, "framework": framework}
for args in example_args:
args_as_dict = {k: v for k, v in args.__dict__.items() if not k.startswith("_") and v is not None}
if "model_name_or_path" in args_as_dict:
model_name = args_as_dict["model_name_or_path"]
# Filter out local paths
if not os.path.isdir(model_name):
data["model_name"] = args_as_dict["model_name_or_path"]
if "dataset_name" in args_as_dict:
data["dataset_name"] = args_as_dict["dataset_name"]
elif "task_name" in args_as_dict:
# Extract script name from the example_name
script_name = example_name.replace("tf_", "").replace("flax_", "").replace("run_", "")
script_name = script_name.replace("_no_trainer", "")
data["dataset_name"] = f"{script_name}-{args_as_dict['task_name']}"
# Send telemetry in the background
send_telemetry(
topic="examples", library_name="transformers", library_version=__version__, user_agent=http_user_agent(data)
)
|
Sends telemetry that helps tracking the examples use.
Args:
example_name (`str`): The name of the example.
*example_args (dataclasses or `argparse.ArgumentParser`): The arguments to the script. This function will only
try to extract the model and dataset name from those. Nothing else is tracked.
framework (`str`, *optional*, defaults to `"pytorch"`): The framework for the example.
|
send_example_telemetry
|
python
|
huggingface/transformers
|
src/transformers/utils/hub.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/hub.py
|
Apache-2.0
|
def convert_file_size_to_int(size: Union[int, str]):
"""
Converts a size expressed as a string with digits an unit (like `"5MB"`) to an integer (in bytes).
Args:
size (`int` or `str`): The size to convert. Will be directly returned if an `int`.
Example:
```py
>>> convert_file_size_to_int("1MiB")
1048576
```
"""
if isinstance(size, int):
return size
if size.upper().endswith("GIB"):
return int(size[:-3]) * (2**30)
if size.upper().endswith("MIB"):
return int(size[:-3]) * (2**20)
if size.upper().endswith("KIB"):
return int(size[:-3]) * (2**10)
if size.upper().endswith("GB"):
int_size = int(size[:-2]) * (10**9)
return int_size // 8 if size.endswith("b") else int_size
if size.upper().endswith("MB"):
int_size = int(size[:-2]) * (10**6)
return int_size // 8 if size.endswith("b") else int_size
if size.upper().endswith("KB"):
int_size = int(size[:-2]) * (10**3)
return int_size // 8 if size.endswith("b") else int_size
raise ValueError("`size` is not in a valid format. Use an integer followed by the unit, e.g., '5GB'.")
|
Converts a size expressed as a string with digits an unit (like `"5MB"`) to an integer (in bytes).
Args:
size (`int` or `str`): The size to convert. Will be directly returned if an `int`.
Example:
```py
>>> convert_file_size_to_int("1MiB")
1048576
```
|
convert_file_size_to_int
|
python
|
huggingface/transformers
|
src/transformers/utils/hub.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/hub.py
|
Apache-2.0
|
def get_checkpoint_shard_files(
pretrained_model_name_or_path,
index_filename,
cache_dir=None,
force_download=False,
proxies=None,
resume_download=None,
local_files_only=False,
token=None,
user_agent=None,
revision=None,
subfolder="",
_commit_hash=None,
**deprecated_kwargs,
):
"""
For a given model:
- download and cache all the shards of a sharded checkpoint if `pretrained_model_name_or_path` is a model ID on the
Hub
- returns the list of paths to all the shards, as well as some metadata.
For the description of each arg, see [`PreTrainedModel.from_pretrained`]. `index_filename` is the full path to the
index (downloaded and cached if `pretrained_model_name_or_path` is a model ID on the Hub).
"""
import json
use_auth_token = deprecated_kwargs.pop("use_auth_token", None)
if use_auth_token is not None:
warnings.warn(
"The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
FutureWarning,
)
if token is not None:
raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.")
token = use_auth_token
if not os.path.isfile(index_filename):
raise ValueError(f"Can't find a checkpoint index ({index_filename}) in {pretrained_model_name_or_path}.")
with open(index_filename) as f:
index = json.loads(f.read())
shard_filenames = sorted(set(index["weight_map"].values()))
sharded_metadata = index["metadata"]
sharded_metadata["all_checkpoint_keys"] = list(index["weight_map"].keys())
sharded_metadata["weight_map"] = index["weight_map"].copy()
# First, let's deal with local folder.
if os.path.isdir(pretrained_model_name_or_path):
shard_filenames = [os.path.join(pretrained_model_name_or_path, subfolder, f) for f in shard_filenames]
return shard_filenames, sharded_metadata
# At this stage pretrained_model_name_or_path is a model identifier on the Hub. Try to get everything from cache,
# or download the files
cached_filenames = cached_files(
pretrained_model_name_or_path,
shard_filenames,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
token=token,
user_agent=user_agent,
revision=revision,
subfolder=subfolder,
_commit_hash=_commit_hash,
)
return cached_filenames, sharded_metadata
|
For a given model:
- download and cache all the shards of a sharded checkpoint if `pretrained_model_name_or_path` is a model ID on the
Hub
- returns the list of paths to all the shards, as well as some metadata.
For the description of each arg, see [`PreTrainedModel.from_pretrained`]. `index_filename` is the full path to the
index (downloaded and cached if `pretrained_model_name_or_path` is a model ID on the Hub).
|
get_checkpoint_shard_files
|
python
|
huggingface/transformers
|
src/transformers/utils/hub.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/hub.py
|
Apache-2.0
|
def is_torch_deterministic():
"""
Check whether pytorch uses deterministic algorithms by looking if torch.set_deterministic_debug_mode() is set to 1 or 2"
"""
if is_torch_available():
import torch
if torch.get_deterministic_debug_mode() == 0:
return False
else:
return True
return False
|
Check whether pytorch uses deterministic algorithms by looking if torch.set_deterministic_debug_mode() is set to 1 or 2"
|
is_torch_deterministic
|
python
|
huggingface/transformers
|
src/transformers/utils/import_utils.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/import_utils.py
|
Apache-2.0
|
def is_torch_xla_available(check_is_tpu=False, check_is_gpu=False):
"""
Check if `torch_xla` is available. To train a native pytorch job in an environment with torch xla installed, set
the USE_TORCH_XLA to false.
"""
assert not (check_is_tpu and check_is_gpu), "The check_is_tpu and check_is_gpu cannot both be true."
if not _torch_xla_available:
return False
import torch_xla
if check_is_gpu:
return torch_xla.runtime.device_type() in ["GPU", "CUDA"]
elif check_is_tpu:
return torch_xla.runtime.device_type() == "TPU"
return True
|
Check if `torch_xla` is available. To train a native pytorch job in an environment with torch xla installed, set
the USE_TORCH_XLA to false.
|
is_torch_xla_available
|
python
|
huggingface/transformers
|
src/transformers/utils/import_utils.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/import_utils.py
|
Apache-2.0
|
def is_torch_npu_available(check_device=False):
"Checks if `torch_npu` is installed and potentially if a NPU is in the environment"
if not _torch_available or importlib.util.find_spec("torch_npu") is None:
return False
import torch
import torch_npu # noqa: F401
if check_device:
try:
# Will raise a RuntimeError if no NPU is found
_ = torch.npu.device_count()
return torch.npu.is_available()
except RuntimeError:
return False
return hasattr(torch, "npu") and torch.npu.is_available()
|
Checks if `torch_npu` is installed and potentially if a NPU is in the environment
|
is_torch_npu_available
|
python
|
huggingface/transformers
|
src/transformers/utils/import_utils.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/import_utils.py
|
Apache-2.0
|
def is_torch_mlu_available(check_device=False):
"""
Checks if `mlu` is available via an `cndev-based` check which won't trigger the drivers and leave mlu
uninitialized.
"""
if not _torch_available or importlib.util.find_spec("torch_mlu") is None:
return False
import torch
import torch_mlu # noqa: F401
pytorch_cndev_based_mlu_check_previous_value = os.environ.get("PYTORCH_CNDEV_BASED_MLU_CHECK")
try:
os.environ["PYTORCH_CNDEV_BASED_MLU_CHECK"] = str(1)
available = torch.mlu.is_available()
finally:
if pytorch_cndev_based_mlu_check_previous_value:
os.environ["PYTORCH_CNDEV_BASED_MLU_CHECK"] = pytorch_cndev_based_mlu_check_previous_value
else:
os.environ.pop("PYTORCH_CNDEV_BASED_MLU_CHECK", None)
return available
|
Checks if `mlu` is available via an `cndev-based` check which won't trigger the drivers and leave mlu
uninitialized.
|
is_torch_mlu_available
|
python
|
huggingface/transformers
|
src/transformers/utils/import_utils.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/import_utils.py
|
Apache-2.0
|
def is_torch_musa_available(check_device=False):
"Checks if `torch_musa` is installed and potentially if a MUSA is in the environment"
if not _torch_available or importlib.util.find_spec("torch_musa") is None:
return False
import torch
import torch_musa # noqa: F401
torch_musa_min_version = "0.33.0"
if _accelerate_available and version.parse(_accelerate_version) < version.parse(torch_musa_min_version):
return False
if check_device:
try:
# Will raise a RuntimeError if no MUSA is found
_ = torch.musa.device_count()
return torch.musa.is_available()
except RuntimeError:
return False
return hasattr(torch, "musa") and torch.musa.is_available()
|
Checks if `torch_musa` is installed and potentially if a MUSA is in the environment
|
is_torch_musa_available
|
python
|
huggingface/transformers
|
src/transformers/utils/import_utils.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/import_utils.py
|
Apache-2.0
|
def is_torch_hpu_available():
"Checks if `torch.hpu` is available and potentially if a HPU is in the environment"
if (
not _torch_available
or importlib.util.find_spec("habana_frameworks") is None
or importlib.util.find_spec("habana_frameworks.torch") is None
):
return False
torch_hpu_min_version = "1.5.0"
if _accelerate_available and version.parse(_accelerate_version) < version.parse(torch_hpu_min_version):
return False
import torch
if os.environ.get("PT_HPU_LAZY_MODE", "1") == "1":
# import habana_frameworks.torch in case of lazy mode to patch torch with torch.hpu
import habana_frameworks.torch # noqa: F401
if not hasattr(torch, "hpu") or not torch.hpu.is_available():
return False
import habana_frameworks.torch.utils.experimental as htexp # noqa: F401
# IlyasMoutawwakil: We patch masked_fill_ for int64 tensors to avoid a bug on Gaudi1
# synNodeCreateWithId failed for node: masked_fill_fwd_i64 with synStatus 26 [Generic failure]
# This can be removed once Gaudi1 support is discontinued but for now we need it to keep using
# dl1.24xlarge Gaudi1 instances on AWS for testing.
# check if the device is Gaudi1 (vs Gaudi2, Gaudi3).
if htexp._get_device_type() == htexp.synDeviceType.synDeviceGaudi:
original_masked_fill_ = torch.Tensor.masked_fill_
def patched_masked_fill_(self, mask, value):
if self.dtype == torch.int64:
logger.warning_once(
"In-place tensor.masked_fill_(mask, value) is not supported for int64 tensors on Gaudi1. "
"This operation will be performed out-of-place using tensor[mask] = value."
)
self[mask] = value
else:
original_masked_fill_(self, mask, value)
torch.Tensor.masked_fill_ = patched_masked_fill_
return True
|
Checks if `torch.hpu` is available and potentially if a HPU is in the environment
|
is_torch_hpu_available
|
python
|
huggingface/transformers
|
src/transformers/utils/import_utils.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/import_utils.py
|
Apache-2.0
|
def is_ninja_available():
r"""
Code comes from *torch.utils.cpp_extension.is_ninja_available()*. Returns `True` if the
[ninja](https://ninja-build.org/) build system is available on the system, `False` otherwise.
"""
try:
subprocess.check_output("ninja --version".split())
except Exception:
return False
else:
return True
|
Code comes from *torch.utils.cpp_extension.is_ninja_available()*. Returns `True` if the
[ninja](https://ninja-build.org/) build system is available on the system, `False` otherwise.
|
is_ninja_available
|
python
|
huggingface/transformers
|
src/transformers/utils/import_utils.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/import_utils.py
|
Apache-2.0
|
def is_torch_xpu_available(check_device=False):
"""
Checks if XPU acceleration is available either via native PyTorch (>=2.6),
`intel_extension_for_pytorch` or via stock PyTorch (>=2.4) and potentially
if a XPU is in the environment.
"""
if not is_torch_available():
return False
torch_version = version.parse(_torch_version)
if torch_version.major == 2 and torch_version.minor < 6:
if is_ipex_available():
import intel_extension_for_pytorch # noqa: F401
elif torch_version.major == 2 and torch_version.minor < 4:
return False
import torch
if check_device:
try:
# Will raise a RuntimeError if no XPU is found
_ = torch.xpu.device_count()
return torch.xpu.is_available()
except RuntimeError:
return False
return hasattr(torch, "xpu") and torch.xpu.is_available()
|
Checks if XPU acceleration is available either via native PyTorch (>=2.6),
`intel_extension_for_pytorch` or via stock PyTorch (>=2.4) and potentially
if a XPU is in the environment.
|
is_torch_xpu_available
|
python
|
huggingface/transformers
|
src/transformers/utils/import_utils.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/import_utils.py
|
Apache-2.0
|
def is_torch_greater_or_equal(library_version: str, accept_dev: bool = False):
"""
Accepts a library version and returns True if the current version of the library is greater than or equal to the
given version. If `accept_dev` is True, it will also accept development versions (e.g. 2.7.0.dev20250320 matches
2.7.0).
"""
if not _is_package_available("torch"):
return False
if accept_dev:
return version.parse(version.parse(importlib.metadata.version("torch")).base_version) >= version.parse(
library_version
)
else:
return version.parse(importlib.metadata.version("torch")) >= version.parse(library_version)
|
Accepts a library version and returns True if the current version of the library is greater than or equal to the
given version. If `accept_dev` is True, it will also accept development versions (e.g. 2.7.0.dev20250320 matches
2.7.0).
|
is_torch_greater_or_equal
|
python
|
huggingface/transformers
|
src/transformers/utils/import_utils.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/import_utils.py
|
Apache-2.0
|
def direct_transformers_import(path: str, file="__init__.py") -> ModuleType:
"""Imports transformers directly
Args:
path (`str`): The path to the source file
file (`str`, *optional*): The file to join with the path. Defaults to "__init__.py".
Returns:
`ModuleType`: The resulting imported module
"""
name = "transformers"
location = os.path.join(path, file)
spec = importlib.util.spec_from_file_location(name, location, submodule_search_locations=[path])
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
module = sys.modules[name]
return module
|
Imports transformers directly
Args:
path (`str`): The path to the source file
file (`str`, *optional*): The file to join with the path. Defaults to "__init__.py".
Returns:
`ModuleType`: The resulting imported module
|
direct_transformers_import
|
python
|
huggingface/transformers
|
src/transformers/utils/import_utils.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/import_utils.py
|
Apache-2.0
|
def requires(*, backends=()):
"""
This decorator enables two things:
- Attaching a `__backends` tuple to an object to see what are the necessary backends for it
to execute correctly without instantiating it
- The '@requires' string is used to dynamically import objects
"""
if not isinstance(backends, tuple):
raise ValueError("Backends should be a tuple.")
applied_backends = []
for backend in backends:
if backend in BACKENDS_MAPPING:
applied_backends.append(backend)
else:
if any(key in backend for key in ["=", "<", ">"]):
applied_backends.append(Backend(backend))
else:
raise ValueError(f"Backend should be defined in the BACKENDS_MAPPING. Offending backend: {backend}")
def inner_fn(fun):
fun.__backends = applied_backends
return fun
return inner_fn
|
This decorator enables two things:
- Attaching a `__backends` tuple to an object to see what are the necessary backends for it
to execute correctly without instantiating it
- The '@requires' string is used to dynamically import objects
|
requires
|
python
|
huggingface/transformers
|
src/transformers/utils/import_utils.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/import_utils.py
|
Apache-2.0
|
def fetch__all__(file_content):
"""
Returns the content of the __all__ variable in the file content.
Returns None if not defined, otherwise returns a list of strings.
"""
if "__all__" not in file_content:
return []
start_index = None
lines = file_content.splitlines()
for index, line in enumerate(lines):
if line.startswith("__all__"):
start_index = index
# There is no line starting with `__all__`
if start_index is None:
return []
lines = lines[start_index:]
if not lines[0].startswith("__all__"):
raise ValueError(
"fetch__all__ accepts a list of lines, with the first line being the __all__ variable declaration"
)
# __all__ is defined on a single line
if lines[0].endswith("]"):
return [obj.strip("\"' ") for obj in lines[0].split("=")[1].strip(" []").split(",")]
# __all__ is defined on multiple lines
else:
_all = []
for __all__line_index in range(1, len(lines)):
if lines[__all__line_index].strip() == "]":
return _all
else:
_all.append(lines[__all__line_index].strip("\"', "))
return _all
|
Returns the content of the __all__ variable in the file content.
Returns None if not defined, otherwise returns a list of strings.
|
fetch__all__
|
python
|
huggingface/transformers
|
src/transformers/utils/import_utils.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/import_utils.py
|
Apache-2.0
|
def create_import_structure_from_path(module_path):
"""
This method takes the path to a file/a folder and returns the import structure.
If a file is given, it will return the import structure of the parent folder.
Import structures are designed to be digestible by `_LazyModule` objects. They are
created from the __all__ definitions in each files as well as the `@require` decorators
above methods and objects.
The import structure allows explicit display of the required backends for a given object.
These backends are specified in two ways:
1. Through their `@require`, if they are exported with that decorator. This `@require` decorator
accepts a `backend` tuple kwarg mentioning which backends are required to run this object.
2. If an object is defined in a file with "default" backends, it will have, at a minimum, this
backend specified. The default backends are defined according to the filename:
- If a file is named like `modeling_*.py`, it will have a `torch` backend
- If a file is named like `modeling_tf_*.py`, it will have a `tf` backend
- If a file is named like `modeling_flax_*.py`, it will have a `flax` backend
- If a file is named like `tokenization_*_fast.py`, it will have a `tokenizers` backend
- If a file is named like `image_processing*_fast.py`, it will have a `torchvision` + `torch` backend
Backends serve the purpose of displaying a clear error message to the user in case the backends are not installed.
Should an object be imported without its required backends being in the environment, any attempt to use the
object will raise an error mentioning which backend(s) should be added to the environment in order to use
that object.
Here's an example of an input import structure at the src.transformers.models level:
{
'albert': {
frozenset(): {
'configuration_albert': {'AlbertConfig', 'AlbertOnnxConfig'}
},
frozenset({'tokenizers'}): {
'tokenization_albert_fast': {'AlbertTokenizerFast'}
},
},
'align': {
frozenset(): {
'configuration_align': {'AlignConfig', 'AlignTextConfig', 'AlignVisionConfig'},
'processing_align': {'AlignProcessor'}
},
},
'altclip': {
frozenset(): {
'configuration_altclip': {'AltCLIPConfig', 'AltCLIPTextConfig', 'AltCLIPVisionConfig'},
'processing_altclip': {'AltCLIPProcessor'},
}
}
}
"""
import_structure = {}
if os.path.isfile(module_path):
module_path = os.path.dirname(module_path)
directory = module_path
adjacent_modules = []
for f in os.listdir(module_path):
if f != "__pycache__" and os.path.isdir(os.path.join(module_path, f)):
import_structure[f] = create_import_structure_from_path(os.path.join(module_path, f))
elif not os.path.isdir(os.path.join(directory, f)):
adjacent_modules.append(f)
# We're only taking a look at files different from __init__.py
# We could theoretically require things directly from the __init__.py
# files, but this is not supported at this time.
if "__init__.py" in adjacent_modules:
adjacent_modules.remove("__init__.py")
# Modular files should not be imported
def find_substring(substring, list_):
return any(substring in x for x in list_)
if find_substring("modular_", adjacent_modules) and find_substring("modeling_", adjacent_modules):
adjacent_modules = [module for module in adjacent_modules if "modular_" not in module]
module_requirements = {}
for module_name in adjacent_modules:
# Only modules ending in `.py` are accepted here.
if not module_name.endswith(".py"):
continue
with open(os.path.join(directory, module_name), encoding="utf-8") as f:
file_content = f.read()
# Remove the .py suffix
module_name = module_name[:-3]
previous_line = ""
previous_index = 0
# Some files have some requirements by default.
# For example, any file named `modeling_tf_xxx.py`
# should have TensorFlow as a required backend.
base_requirements = ()
for string_check, requirements in BASE_FILE_REQUIREMENTS.items():
if string_check(module_name):
base_requirements = requirements
break
# Objects that have a `@require` assigned to them will get exported
# with the backends specified in the decorator as well as the file backends.
exported_objects = set()
if "@requires" in file_content:
lines = file_content.split("\n")
for index, line in enumerate(lines):
# This allows exporting items with other decorators. We'll take a look
# at the line that follows at the same indentation level.
if line.startswith((" ", "\t", "@", ")")) and not line.startswith("@requires"):
continue
# Skipping line enables putting whatever we want between the
# export() call and the actual class/method definition.
# This is what enables having # Copied from statements, docs, etc.
skip_line = False
if "@requires" in previous_line:
skip_line = False
# Backends are defined on the same line as export
if "backends" in previous_line:
backends_string = previous_line.split("backends=")[1].split("(")[1].split(")")[0]
backends = tuple(sorted([b.strip("'\",") for b in backends_string.split(", ") if b]))
# Backends are defined in the lines following export, for example such as:
# @export(
# backends=(
# "sentencepiece",
# "torch",
# "tf",
# )
# )
#
# or
#
# @export(
# backends=(
# "sentencepiece", "tf"
# )
# )
elif "backends" in lines[previous_index + 1]:
backends = []
for backend_line in lines[previous_index:index]:
if "backends" in backend_line:
backend_line = backend_line.split("=")[1]
if '"' in backend_line or "'" in backend_line:
if ", " in backend_line:
backends.extend(backend.strip("()\"', ") for backend in backend_line.split(", "))
else:
backends.append(backend_line.strip("()\"', "))
# If the line is only a ')', then we reached the end of the backends and we break.
if backend_line.strip() == ")":
break
backends = tuple(backends)
# No backends are registered for export
else:
backends = ()
backends = frozenset(backends + base_requirements)
if backends not in module_requirements:
module_requirements[backends] = {}
if module_name not in module_requirements[backends]:
module_requirements[backends][module_name] = set()
if not line.startswith("class") and not line.startswith("def"):
skip_line = True
else:
start_index = 6 if line.startswith("class") else 4
object_name = line[start_index:].split("(")[0].strip(":")
module_requirements[backends][module_name].add(object_name)
exported_objects.add(object_name)
if not skip_line:
previous_line = line
previous_index = index
# All objects that are in __all__ should be exported by default.
# These objects are exported with the file backends.
if "__all__" in file_content:
for _all_object in fetch__all__(file_content):
if _all_object not in exported_objects:
backends = frozenset(base_requirements)
if backends not in module_requirements:
module_requirements[backends] = {}
if module_name not in module_requirements[backends]:
module_requirements[backends][module_name] = set()
module_requirements[backends][module_name].add(_all_object)
import_structure = {**module_requirements, **import_structure}
return import_structure
|
This method takes the path to a file/a folder and returns the import structure.
If a file is given, it will return the import structure of the parent folder.
Import structures are designed to be digestible by `_LazyModule` objects. They are
created from the __all__ definitions in each files as well as the `@require` decorators
above methods and objects.
The import structure allows explicit display of the required backends for a given object.
These backends are specified in two ways:
1. Through their `@require`, if they are exported with that decorator. This `@require` decorator
accepts a `backend` tuple kwarg mentioning which backends are required to run this object.
2. If an object is defined in a file with "default" backends, it will have, at a minimum, this
backend specified. The default backends are defined according to the filename:
- If a file is named like `modeling_*.py`, it will have a `torch` backend
- If a file is named like `modeling_tf_*.py`, it will have a `tf` backend
- If a file is named like `modeling_flax_*.py`, it will have a `flax` backend
- If a file is named like `tokenization_*_fast.py`, it will have a `tokenizers` backend
- If a file is named like `image_processing*_fast.py`, it will have a `torchvision` + `torch` backend
Backends serve the purpose of displaying a clear error message to the user in case the backends are not installed.
Should an object be imported without its required backends being in the environment, any attempt to use the
object will raise an error mentioning which backend(s) should be added to the environment in order to use
that object.
Here's an example of an input import structure at the src.transformers.models level:
{
'albert': {
frozenset(): {
'configuration_albert': {'AlbertConfig', 'AlbertOnnxConfig'}
},
frozenset({'tokenizers'}): {
'tokenization_albert_fast': {'AlbertTokenizerFast'}
},
},
'align': {
frozenset(): {
'configuration_align': {'AlignConfig', 'AlignTextConfig', 'AlignVisionConfig'},
'processing_align': {'AlignProcessor'}
},
},
'altclip': {
frozenset(): {
'configuration_altclip': {'AltCLIPConfig', 'AltCLIPTextConfig', 'AltCLIPVisionConfig'},
'processing_altclip': {'AltCLIPProcessor'},
}
}
}
|
create_import_structure_from_path
|
python
|
huggingface/transformers
|
src/transformers/utils/import_utils.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/import_utils.py
|
Apache-2.0
|
def spread_import_structure(nested_import_structure):
"""
This method takes as input an unordered import structure and brings the required backends at the top-level,
aggregating modules and objects under their required backends.
Here's an example of an input import structure at the src.transformers.models level:
{
'albert': {
frozenset(): {
'configuration_albert': {'AlbertConfig', 'AlbertOnnxConfig'}
},
frozenset({'tokenizers'}): {
'tokenization_albert_fast': {'AlbertTokenizerFast'}
},
},
'align': {
frozenset(): {
'configuration_align': {'AlignConfig', 'AlignTextConfig', 'AlignVisionConfig'},
'processing_align': {'AlignProcessor'}
},
},
'altclip': {
frozenset(): {
'configuration_altclip': {'AltCLIPConfig', 'AltCLIPTextConfig', 'AltCLIPVisionConfig'},
'processing_altclip': {'AltCLIPProcessor'},
}
}
}
Here's an example of an output import structure at the src.transformers.models level:
{
frozenset({'tokenizers'}): {
'albert.tokenization_albert_fast': {'AlbertTokenizerFast'}
},
frozenset(): {
'albert.configuration_albert': {'AlbertConfig', 'AlbertOnnxConfig'},
'align.processing_align': {'AlignProcessor'},
'align.configuration_align': {'AlignConfig', 'AlignTextConfig', 'AlignVisionConfig'},
'altclip.configuration_altclip': {'AltCLIPConfig', 'AltCLIPTextConfig', 'AltCLIPVisionConfig'},
'altclip.processing_altclip': {'AltCLIPProcessor'}
}
}
"""
def propagate_frozenset(unordered_import_structure):
frozenset_first_import_structure = {}
for _key, _value in unordered_import_structure.items():
# If the value is not a dict but a string, no need for custom manipulation
if not isinstance(_value, dict):
frozenset_first_import_structure[_key] = _value
elif any(isinstance(v, frozenset) for v in _value.keys()):
for k, v in _value.items():
if isinstance(k, frozenset):
# Here we want to switch around _key and k to propagate k upstream if it is a frozenset
if k not in frozenset_first_import_structure:
frozenset_first_import_structure[k] = {}
if _key not in frozenset_first_import_structure[k]:
frozenset_first_import_structure[k][_key] = {}
frozenset_first_import_structure[k][_key].update(v)
else:
# If k is not a frozenset, it means that the dictionary is not "level": some keys (top-level)
# are frozensets, whereas some are not -> frozenset keys are at an unkown depth-level of the
# dictionary.
#
# We recursively propagate the frozenset for this specific dictionary so that the frozensets
# are at the top-level when we handle them.
propagated_frozenset = propagate_frozenset({k: v})
for r_k, r_v in propagated_frozenset.items():
if isinstance(_key, frozenset):
if r_k not in frozenset_first_import_structure:
frozenset_first_import_structure[r_k] = {}
if _key not in frozenset_first_import_structure[r_k]:
frozenset_first_import_structure[r_k][_key] = {}
# _key is a frozenset -> we switch around the r_k and _key
frozenset_first_import_structure[r_k][_key].update(r_v)
else:
if _key not in frozenset_first_import_structure:
frozenset_first_import_structure[_key] = {}
if r_k not in frozenset_first_import_structure[_key]:
frozenset_first_import_structure[_key][r_k] = {}
# _key is not a frozenset -> we keep the order of r_k and _key
frozenset_first_import_structure[_key][r_k].update(r_v)
else:
frozenset_first_import_structure[_key] = propagate_frozenset(_value)
return frozenset_first_import_structure
def flatten_dict(_dict, previous_key=None):
items = []
for _key, _value in _dict.items():
_key = f"{previous_key}.{_key}" if previous_key is not None else _key
if isinstance(_value, dict):
items.extend(flatten_dict(_value, _key).items())
else:
items.append((_key, _value))
return dict(items)
# The tuples contain the necessary backends. We want these first, so we propagate them up the
# import structure.
ordered_import_structure = nested_import_structure
# 6 is a number that gives us sufficient depth to go through all files and foreseeable folder depths
# while not taking too long to parse.
for i in range(6):
ordered_import_structure = propagate_frozenset(ordered_import_structure)
# We then flatten the dict so that it references a module path.
flattened_import_structure = {}
for key, value in ordered_import_structure.copy().items():
if isinstance(key, str):
del ordered_import_structure[key]
else:
flattened_import_structure[key] = flatten_dict(value)
return flattened_import_structure
|
This method takes as input an unordered import structure and brings the required backends at the top-level,
aggregating modules and objects under their required backends.
Here's an example of an input import structure at the src.transformers.models level:
{
'albert': {
frozenset(): {
'configuration_albert': {'AlbertConfig', 'AlbertOnnxConfig'}
},
frozenset({'tokenizers'}): {
'tokenization_albert_fast': {'AlbertTokenizerFast'}
},
},
'align': {
frozenset(): {
'configuration_align': {'AlignConfig', 'AlignTextConfig', 'AlignVisionConfig'},
'processing_align': {'AlignProcessor'}
},
},
'altclip': {
frozenset(): {
'configuration_altclip': {'AltCLIPConfig', 'AltCLIPTextConfig', 'AltCLIPVisionConfig'},
'processing_altclip': {'AltCLIPProcessor'},
}
}
}
Here's an example of an output import structure at the src.transformers.models level:
{
frozenset({'tokenizers'}): {
'albert.tokenization_albert_fast': {'AlbertTokenizerFast'}
},
frozenset(): {
'albert.configuration_albert': {'AlbertConfig', 'AlbertOnnxConfig'},
'align.processing_align': {'AlignProcessor'},
'align.configuration_align': {'AlignConfig', 'AlignTextConfig', 'AlignVisionConfig'},
'altclip.configuration_altclip': {'AltCLIPConfig', 'AltCLIPTextConfig', 'AltCLIPVisionConfig'},
'altclip.processing_altclip': {'AltCLIPProcessor'}
}
}
|
spread_import_structure
|
python
|
huggingface/transformers
|
src/transformers/utils/import_utils.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/import_utils.py
|
Apache-2.0
|
def define_import_structure(module_path: str, prefix: Optional[str] = None) -> IMPORT_STRUCTURE_T:
"""
This method takes a module_path as input and creates an import structure digestible by a _LazyModule.
Here's an example of an output import structure at the src.transformers.models level:
{
frozenset({'tokenizers'}): {
'albert.tokenization_albert_fast': {'AlbertTokenizerFast'}
},
frozenset(): {
'albert.configuration_albert': {'AlbertConfig', 'AlbertOnnxConfig'},
'align.processing_align': {'AlignProcessor'},
'align.configuration_align': {'AlignConfig', 'AlignTextConfig', 'AlignVisionConfig'},
'altclip.configuration_altclip': {'AltCLIPConfig', 'AltCLIPTextConfig', 'AltCLIPVisionConfig'},
'altclip.processing_altclip': {'AltCLIPProcessor'}
}
}
The import structure is a dict defined with frozensets as keys, and dicts of strings to sets of objects.
If `prefix` is not None, it will add that prefix to all keys in the returned dict.
"""
import_structure = create_import_structure_from_path(module_path)
spread_dict = spread_import_structure(import_structure)
if prefix is None:
return spread_dict
else:
spread_dict = {k: {f"{prefix}.{kk}": vv for kk, vv in v.items()} for k, v in spread_dict.items()}
return spread_dict
|
This method takes a module_path as input and creates an import structure digestible by a _LazyModule.
Here's an example of an output import structure at the src.transformers.models level:
{
frozenset({'tokenizers'}): {
'albert.tokenization_albert_fast': {'AlbertTokenizerFast'}
},
frozenset(): {
'albert.configuration_albert': {'AlbertConfig', 'AlbertOnnxConfig'},
'align.processing_align': {'AlignProcessor'},
'align.configuration_align': {'AlignConfig', 'AlignTextConfig', 'AlignVisionConfig'},
'altclip.configuration_altclip': {'AltCLIPConfig', 'AltCLIPTextConfig', 'AltCLIPVisionConfig'},
'altclip.processing_altclip': {'AltCLIPProcessor'}
}
}
The import structure is a dict defined with frozensets as keys, and dicts of strings to sets of objects.
If `prefix` is not None, it will add that prefix to all keys in the returned dict.
|
define_import_structure
|
python
|
huggingface/transformers
|
src/transformers/utils/import_utils.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/import_utils.py
|
Apache-2.0
|
def clear_import_cache():
"""
Clear cached Transformers modules to allow reloading modified code.
This is useful when actively developing/modifying Transformers code.
"""
# Get all transformers modules
transformers_modules = [mod_name for mod_name in sys.modules if mod_name.startswith("transformers.")]
# Remove them from sys.modules
for mod_name in transformers_modules:
module = sys.modules[mod_name]
# Clear _LazyModule caches if applicable
if isinstance(module, _LazyModule):
module._objects = {} # Clear cached objects
del sys.modules[mod_name]
# Force reload main transformers module
if "transformers" in sys.modules:
main_module = sys.modules["transformers"]
if isinstance(main_module, _LazyModule):
main_module._objects = {} # Clear cached objects
importlib.reload(main_module)
|
Clear cached Transformers modules to allow reloading modified code.
This is useful when actively developing/modifying Transformers code.
|
clear_import_cache
|
python
|
huggingface/transformers
|
src/transformers/utils/import_utils.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/import_utils.py
|
Apache-2.0
|
def _get_default_logging_level():
"""
If TRANSFORMERS_VERBOSITY env var is set to one of the valid choices return that as the new default level. If it is
not - fall back to `_default_log_level`
"""
env_level_str = os.getenv("TRANSFORMERS_VERBOSITY", None)
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, "
f"has to be one of: {', '.join(log_levels.keys())}"
)
return _default_log_level
|
If TRANSFORMERS_VERBOSITY env var is set to one of the valid choices return that as the new default level. If it is
not - fall back to `_default_log_level`
|
_get_default_logging_level
|
python
|
huggingface/transformers
|
src/transformers/utils/logging.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/logging.py
|
Apache-2.0
|
def get_logger(name: Optional[str] = None) -> logging.Logger:
"""
Return a logger with the specified name.
This function is not supposed to be directly accessed unless you are writing a custom transformers module.
"""
if name is None:
name = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(name)
|
Return a logger with the specified name.
This function is not supposed to be directly accessed unless you are writing a custom transformers module.
|
get_logger
|
python
|
huggingface/transformers
|
src/transformers/utils/logging.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/logging.py
|
Apache-2.0
|
def disable_default_handler() -> None:
"""Disable the default handler of the HuggingFace Transformers's root logger."""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler)
|
Disable the default handler of the HuggingFace Transformers's root logger.
|
disable_default_handler
|
python
|
huggingface/transformers
|
src/transformers/utils/logging.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/logging.py
|
Apache-2.0
|
def enable_default_handler() -> None:
"""Enable the default handler of the HuggingFace Transformers's root logger."""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler)
|
Enable the default handler of the HuggingFace Transformers's root logger.
|
enable_default_handler
|
python
|
huggingface/transformers
|
src/transformers/utils/logging.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/logging.py
|
Apache-2.0
|
def add_handler(handler: logging.Handler) -> None:
"""adds a handler to the HuggingFace Transformers's root logger."""
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(handler)
|
adds a handler to the HuggingFace Transformers's root logger.
|
add_handler
|
python
|
huggingface/transformers
|
src/transformers/utils/logging.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/logging.py
|
Apache-2.0
|
def remove_handler(handler: logging.Handler) -> None:
"""removes given handler from the HuggingFace Transformers's root logger."""
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(handler)
|
removes given handler from the HuggingFace Transformers's root logger.
|
remove_handler
|
python
|
huggingface/transformers
|
src/transformers/utils/logging.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/logging.py
|
Apache-2.0
|
def disable_propagation() -> None:
"""
Disable propagation of the library log outputs. Note that log propagation is disabled by default.
"""
_configure_library_root_logger()
_get_library_root_logger().propagate = False
|
Disable propagation of the library log outputs. Note that log propagation is disabled by default.
|
disable_propagation
|
python
|
huggingface/transformers
|
src/transformers/utils/logging.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/logging.py
|
Apache-2.0
|
def enable_propagation() -> None:
"""
Enable propagation of the library log outputs. Please disable the HuggingFace Transformers's default handler to
prevent double logging if the root logger has been configured.
"""
_configure_library_root_logger()
_get_library_root_logger().propagate = True
|
Enable propagation of the library log outputs. Please disable the HuggingFace Transformers's default handler to
prevent double logging if the root logger has been configured.
|
enable_propagation
|
python
|
huggingface/transformers
|
src/transformers/utils/logging.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/logging.py
|
Apache-2.0
|
def enable_explicit_format() -> None:
"""
Enable explicit formatting for every HuggingFace Transformers's logger. The explicit formatter is as follows:
```
[LEVELNAME|FILENAME|LINE NUMBER] TIME >> MESSAGE
```
All handlers currently bound to the root logger are affected by this method.
"""
handlers = _get_library_root_logger().handlers
for handler in handlers:
formatter = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s")
handler.setFormatter(formatter)
|
Enable explicit formatting for every HuggingFace Transformers's logger. The explicit formatter is as follows:
```
[LEVELNAME|FILENAME|LINE NUMBER] TIME >> MESSAGE
```
All handlers currently bound to the root logger are affected by this method.
|
enable_explicit_format
|
python
|
huggingface/transformers
|
src/transformers/utils/logging.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/logging.py
|
Apache-2.0
|
def reset_format() -> None:
"""
Resets the formatting for HuggingFace Transformers's loggers.
All handlers currently bound to the root logger are affected by this method.
"""
handlers = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(None)
|
Resets the formatting for HuggingFace Transformers's loggers.
All handlers currently bound to the root logger are affected by this method.
|
reset_format
|
python
|
huggingface/transformers
|
src/transformers/utils/logging.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/logging.py
|
Apache-2.0
|
def warning_advice(self, *args, **kwargs):
"""
This method is identical to `logger.warning()`, but if env var TRANSFORMERS_NO_ADVISORY_WARNINGS=1 is set, this
warning will not be printed
"""
no_advisory_warnings = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS", False)
if no_advisory_warnings:
return
self.warning(*args, **kwargs)
|
This method is identical to `logger.warning()`, but if env var TRANSFORMERS_NO_ADVISORY_WARNINGS=1 is set, this
warning will not be printed
|
warning_advice
|
python
|
huggingface/transformers
|
src/transformers/utils/logging.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/logging.py
|
Apache-2.0
|
def attach_tracer(tracer_name_template=None):
"""
Decorator that attaches a tracer to a class.
This decorator should be applied to classes that need OpenTelemetry tracing.
It adds a tracer attribute to the class instance that can be used by the traced decorator.
Args:
tracer_name_template: Optional template string for the tracer name.
If provided, it should contain {module} which will be replaced with the class's full module path
and {class_name} for the class name.
If None, a default naming scheme will be used where:
- If the module already starts with "transformers.", it will use that directly
- Otherwise, it will prepend "transformers." to the module name
Returns:
Class decorator function
"""
if not _has_opentelemetry:
return lambda cls: cls
def decorator(cls):
original_init = cls.__init__
@functools.wraps(original_init)
def init_with_tracer(self, *args, **kwargs):
original_init(self, *args, **kwargs)
module_name = cls.__module__
class_name = cls.__qualname__
if tracer_name_template is None:
if module_name.startswith("transformers."):
tracer_name = f"{module_name}.{class_name}"
else:
tracer_name = f"transformers.{module_name}.{class_name}"
else:
tracer_name = tracer_name_template.format(module=module_name, class_name=class_name)
self.tracer = get_tracer(tracer_name)
cls.__init__ = init_with_tracer
return cls
return decorator
|
Decorator that attaches a tracer to a class.
This decorator should be applied to classes that need OpenTelemetry tracing.
It adds a tracer attribute to the class instance that can be used by the traced decorator.
Args:
tracer_name_template: Optional template string for the tracer name.
If provided, it should contain {module} which will be replaced with the class's full module path
and {class_name} for the class name.
If None, a default naming scheme will be used where:
- If the module already starts with "transformers.", it will use that directly
- Otherwise, it will prepend "transformers." to the module name
Returns:
Class decorator function
|
attach_tracer
|
python
|
huggingface/transformers
|
src/transformers/utils/metrics.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/metrics.py
|
Apache-2.0
|
def traced(
func=None,
*,
span_name=None,
standalone=False,
additional_attributes: Optional[List[Tuple[str, str, Union[Any, Callable[[Any], Any]]]]] = None,
):
"""
Decorator to trace function calls with OpenTelemetry.
Can be used as @traced or @traced(span_name="custom_name")
Args:
func: The function to trace
span_name: Optional custom name for the span (defaults to function name)
standalone: If True, creates a parentless span
additional_attributes: Optional list of additional attributes to set on the span.
Each item is a tuple of (instance_attribute_name, span_attribute_key, value_or_transform_function)
where:
- instance_attribute_name: Name of the attribute to get from the class instance
- span_attribute_key: Key to use when setting the attribute on the span
- value_or_transform_function: Either a raw value to use directly, or a function to transform
the attribute value before setting it on the span
Returns:
Decorated function with tracing
"""
def decorator(func):
if not _has_opentelemetry:
return func
import functools
@functools.wraps(func)
def wrapper(*args, **kwargs):
instance = args[0] if args and (hasattr(func, "__self__") and func.__self__ is not None) else None
is_method = instance is not None
if is_method and hasattr(instance, "tracer"):
tracer = instance.tracer
else:
tracer = get_tracer(f"transformers.{func.__module__}.{func.__name__}")
name = span_name or func.__name__
span_fn = tracer.start_span if standalone else tracer.start_as_current_span
with span_fn(name) as span:
span.set_attribute("function.name", func.__name__)
span.set_attribute("function.module", func.__module__)
span.set_attribute("function.is_method", is_method)
if args:
for i, arg in enumerate(args):
if isinstance(arg, (str, int, float, bool)) or arg is None:
span.set_attribute(f"args.{i}", str(arg))
else:
span.set_attribute(f"args.{i}", str(type(arg)))
if kwargs:
for key, value in kwargs.items():
if isinstance(value, (str, int, float, bool)) or value is None:
span.set_attribute(f"kwargs.{key}", str(value))
else:
span.set_attribute(f"kwargs.{key}", str(type(value)))
if additional_attributes and is_method:
for attr_config in additional_attributes:
instance_attribute_name, span_attribute_key, value_or_transform_function = attr_config
if hasattr(instance, instance_attribute_name):
attribute_value = getattr(instance, instance_attribute_name)
if callable(value_or_transform_function):
transformed_value = value_or_transform_function(attribute_value)
else:
transformed_value = value_or_transform_function
span.set_attribute(span_attribute_key, transformed_value)
try:
result = func(*args, **kwargs)
return result
except Exception as e:
span.set_status(Status(StatusCode.ERROR))
span.record_exception(e)
raise
return wrapper
if func is None:
return decorator
return decorator(func)
|
Decorator to trace function calls with OpenTelemetry.
Can be used as @traced or @traced(span_name="custom_name")
Args:
func: The function to trace
span_name: Optional custom name for the span (defaults to function name)
standalone: If True, creates a parentless span
additional_attributes: Optional list of additional attributes to set on the span.
Each item is a tuple of (instance_attribute_name, span_attribute_key, value_or_transform_function)
where:
- instance_attribute_name: Name of the attribute to get from the class instance
- span_attribute_key: Key to use when setting the attribute on the span
- value_or_transform_function: Either a raw value to use directly, or a function to transform
the attribute value before setting it on the span
Returns:
Decorated function with tracing
|
traced
|
python
|
huggingface/transformers
|
src/transformers/utils/metrics.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/metrics.py
|
Apache-2.0
|
def _setup_metrics(self):
"""Initialize OpenTelemetry metrics and tracing if the library is available."""
if not _has_opentelemetry:
logger.info("OpenTelemetry is not installed. Metrics and tracing will not be recorded.")
return
self.meter = metrics.get_meter("transformers.generation.continuous_batch_processor")
# Define appropriate buckets for TTFT (typically ranges from ~50ms to several seconds)
ttft_buckets = [10, 25, 50, 75, 100, 150, 200, 300, 500, 750, 1000, 2000, 5000, 10000]
self.ttft_histogram = self.meter.create_histogram(
name="ttft_milliseconds",
description="Time to first token in milliseconds",
unit="ms",
explicit_bucket_boundaries_advisory=ttft_buckets,
)
self.active_requests_gauge = self.meter.create_gauge(
name="active_requests_count",
description="Number of active requests currently being processed",
unit="requests",
)
self.waiting_requests_gauge = self.meter.create_gauge(
name="waiting_requests_count",
description="Number of requests waiting to be processed",
unit="requests",
)
# Define appropriate buckets for request latency (similar to TTFT but with higher upper bounds)
latency_buckets = [50, 100, 250, 500, 1000, 2000, 5000, 10000, 20000, 30000, 60000]
self.request_latency_histogram = self.meter.create_histogram(
name="request_latency_milliseconds",
description="End-to-end latency for completed requests in milliseconds",
unit="ms",
explicit_bucket_boundaries_advisory=latency_buckets,
)
self.decode_prefill_ratio_gauge = self.meter.create_gauge(
name="decode_prefill_ratio",
description="Ratio of decode tokens to prefill tokens in a batch",
unit="ratio",
)
self.prefill_tokens_counter = self.meter.create_counter(
name="prefill_tokens_processed",
description="Number of prefill tokens processed",
unit="tokens",
)
self.decode_tokens_counter = self.meter.create_counter(
name="decode_tokens_processed",
description="Number of decode tokens processed",
unit="tokens",
)
# Define appropriate buckets for batch fill percentage (0-100%)
batch_fill_buckets = [5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 98, 100]
self.batch_fill_percentage_histogram = self.meter.create_histogram(
name="batch_fill_percentage",
description="Percentage of max_batch_tokens utilized in each batch",
unit="percent",
explicit_bucket_boundaries_advisory=batch_fill_buckets,
)
self.kv_cache_free_memory_gauge = self.meter.create_gauge(
name="kv_cache_free_memory_bytes",
description="Free memory of the PagedAttentionCache in bytes",
unit="bytes",
)
self.kv_cache_memory_gauge = self.meter.create_gauge(
name="kv_cache_memory_bytes",
description="Memory usage of the PagedAttentionCache in bytes",
unit="bytes",
)
|
Initialize OpenTelemetry metrics and tracing if the library is available.
|
_setup_metrics
|
python
|
huggingface/transformers
|
src/transformers/utils/metrics.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/metrics.py
|
Apache-2.0
|
def record_ttft_metric(self, created_time: float, request_id: str) -> None:
"""Record Time to First Token (TTFT).
Args:
created_time: The time the request was created
request_id: The ID of the request
"""
if not _has_opentelemetry:
return
ttft_ms = (time.time() - created_time) * 1000.0
try:
self.ttft_histogram.record(ttft_ms)
logger.debug(f"Recorded TTFT for request {request_id}: {ttft_ms:.2f}ms")
except Exception as e:
logger.warning(f"Failed to record TTFT metric: {e}")
|
Record Time to First Token (TTFT).
Args:
created_time: The time the request was created
request_id: The ID of the request
|
record_ttft_metric
|
python
|
huggingface/transformers
|
src/transformers/utils/metrics.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/metrics.py
|
Apache-2.0
|
def record_batch_metrics(self, requests_in_batch: List) -> None:
"""Record metrics about the batch composition including decode/prefill ratio and batch fill percentage.
Args:
requests_in_batch: List of request states in the current batch
"""
if not _has_opentelemetry or not requests_in_batch:
return
decode_tokens = 0
prefill_tokens = 0
for state in requests_in_batch:
if state.status == RequestStatus.DECODING:
decode_tokens += 1
elif state.status in [RequestStatus.PREFILLING, RequestStatus.PREFILLING_SPLIT]:
prefill_tokens += len(state.prompt_ids)
total_batch_tokens = decode_tokens + prefill_tokens
try:
if prefill_tokens > 0:
self.prefill_tokens_counter.add(prefill_tokens)
if decode_tokens > 0:
self.decode_tokens_counter.add(decode_tokens)
if prefill_tokens > 0:
ratio = decode_tokens / prefill_tokens
self.decode_prefill_ratio_gauge.set(ratio)
fill_percentage = (total_batch_tokens / self.max_batch_tokens) * 100.0
self.batch_fill_percentage_histogram.record(fill_percentage)
logger.debug(
f"Batch metrics: {decode_tokens} decode tokens, {prefill_tokens} prefill tokens, "
f"batch fill: {fill_percentage:.2f}% ({total_batch_tokens}/{self.max_batch_tokens})"
)
except Exception as e:
logger.warning(f"Failed to record batch metrics: {e}")
|
Record metrics about the batch composition including decode/prefill ratio and batch fill percentage.
Args:
requests_in_batch: List of request states in the current batch
|
record_batch_metrics
|
python
|
huggingface/transformers
|
src/transformers/utils/metrics.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/metrics.py
|
Apache-2.0
|
def record_kv_cache_memory_metrics(self, cache) -> None:
"""Record memory usage of the PagedAttentionCache without GPU synchronization.
This calculates the theoretical memory usage based on cache configuration
and the number of blocks currently in use.
Args:
cache: The PagedAttentionCache object to measure
"""
if not _has_opentelemetry:
return
try:
# Calculate memory usage based on cache configuration
num_used_blocks = cache.num_blocks - len(cache._free_blocks)
num_layers = len(cache.key_cache)
# Each used block stores key and value states
# Each with shape: (num_kv_heads, block_size, head_dim)
bytes_per_parameter = 2 if cache.dtype in [torch.float16, torch.bfloat16] else 4 # Size in bytes
# Total bytes = num_layers * num_used_blocks * block_size *
# num_kv_heads * head_dim * 2 (both K and V) * bytes_per_parameter
memory_bytes = (
num_layers
* num_used_blocks
* cache.block_size
* cache.num_key_value_heads
* cache.head_dim
* 2 # For both key and value caches
* bytes_per_parameter
)
free_memory_bytes = (
num_layers
* len(cache._free_blocks)
* cache.block_size
* cache.num_key_value_heads
* cache.head_dim
* 2 # For both key and value caches
* bytes_per_parameter
)
self.kv_cache_memory_gauge.set(memory_bytes)
self.kv_cache_free_memory_gauge.set(free_memory_bytes)
logger.debug(
f"KV Cache memory: {memory_bytes / (1024 * 1024):.2f}MB, "
f"Used blocks: {num_used_blocks}/{cache.num_blocks} "
f"({num_used_blocks / cache.num_blocks * 100:.1f}%)"
)
except Exception as e:
logger.warning(f"Failed to record KV cache memory metrics: {e}")
|
Record memory usage of the PagedAttentionCache without GPU synchronization.
This calculates the theoretical memory usage based on cache configuration
and the number of blocks currently in use.
Args:
cache: The PagedAttentionCache object to measure
|
record_kv_cache_memory_metrics
|
python
|
huggingface/transformers
|
src/transformers/utils/metrics.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/metrics.py
|
Apache-2.0
|
def record_queue_metrics(self, active_requests: int, waiting_requests: int) -> None:
"""Record metrics about active and waiting requests.
Args:
active_requests: Number of active requests
waiting_requests: Number of waiting requests
"""
if not _has_opentelemetry:
return
try:
self.active_requests_gauge.set(active_requests)
self.waiting_requests_gauge.set(waiting_requests)
logger.debug(f"Queue metrics: {active_requests} active requests, {waiting_requests} waiting requests")
except Exception as e:
logger.warning(f"Failed to record queue metrics: {e}")
|
Record metrics about active and waiting requests.
Args:
active_requests: Number of active requests
waiting_requests: Number of waiting requests
|
record_queue_metrics
|
python
|
huggingface/transformers
|
src/transformers/utils/metrics.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/metrics.py
|
Apache-2.0
|
def record_request_completion(self, created_time: float, request_id: str) -> None:
"""Record metrics about a completed request.
Args:
created_time: The time the request was created
request_id: The ID of the request
"""
if not _has_opentelemetry:
return
latency_ms = (time.time() - created_time) * 1000.0
try:
self.request_latency_histogram.record(latency_ms)
logger.debug(f"Recorded request completion for {request_id}: {latency_ms:.2f}ms")
except Exception as e:
logger.warning(f"Failed to record request completion metric: {e}")
|
Record metrics about a completed request.
Args:
created_time: The time the request was created
request_id: The ID of the request
|
record_request_completion
|
python
|
huggingface/transformers
|
src/transformers/utils/metrics.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/metrics.py
|
Apache-2.0
|
def get_device_map(n_layers, devices):
"""Returns a dictionary of layers distributed evenly across all devices."""
layers = list(range(n_layers))
n_blocks = int(ceil(n_layers / len(devices)))
layers_list = [layers[i : i + n_blocks] for i in range(0, n_layers, n_blocks)]
return dict(zip(devices, layers_list))
|
Returns a dictionary of layers distributed evenly across all devices.
|
get_device_map
|
python
|
huggingface/transformers
|
src/transformers/utils/model_parallel_utils.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/model_parallel_utils.py
|
Apache-2.0
|
def format_time(t):
"Format `t` (in seconds) to (h):mm:ss"
t = int(t)
h, m, s = t // 3600, (t // 60) % 60, t % 60
return f"{h}:{m:02d}:{s:02d}" if h != 0 else f"{m:02d}:{s:02d}"
|
Format `t` (in seconds) to (h):mm:ss
|
format_time
|
python
|
huggingface/transformers
|
src/transformers/utils/notebook.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/notebook.py
|
Apache-2.0
|
def text_to_html_table(items):
"Put the texts in `items` in an HTML table."
html_code = """<table border="1" class="dataframe">\n"""
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f" <th>{i}</th>\n"
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
elt = f"{elt:.6f}" if isinstance(elt, float) else str(elt)
html_code += f" <td>{elt}</td>\n"
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
|
Put the texts in `items` in an HTML table.
|
text_to_html_table
|
python
|
huggingface/transformers
|
src/transformers/utils/notebook.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/notebook.py
|
Apache-2.0
|
def update(self, value: int, force_update: bool = False, comment: Optional[str] = None):
"""
The main method to update the progress bar to `value`.
Args:
value (`int`):
The value to use. Must be between 0 and `total`.
force_update (`bool`, *optional*, defaults to `False`):
Whether or not to force and update of the internal state and display (by default, the bar will wait for
`value` to reach the value it predicted corresponds to a time of more than the `update_every` attribute
since the last update to avoid adding boilerplate).
comment (`str`, *optional*):
A comment to add on the left of the progress bar.
"""
self.value = value
if comment is not None:
self.comment = comment
if self.last_value is None:
self.start_time = self.last_time = time.time()
self.start_value = self.last_value = value
self.elapsed_time = self.predicted_remaining = None
self.first_calls = self.warmup
self.wait_for = 1
self.update_bar(value)
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for, self.total):
if self.first_calls > 0:
self.first_calls -= 1
current_time = time.time()
self.elapsed_time = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
self.average_time_per_item = self.elapsed_time / (value - self.start_value)
else:
self.average_time_per_item = None
if value >= self.total:
value = self.total
self.predicted_remaining = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
self.predicted_remaining = self.average_time_per_item * (self.total - value)
self.update_bar(value)
self.last_value = value
self.last_time = current_time
if (self.average_time_per_item is None) or (self.average_time_per_item == 0):
self.wait_for = 1
else:
self.wait_for = max(int(self.update_every / self.average_time_per_item), 1)
|
The main method to update the progress bar to `value`.
Args:
value (`int`):
The value to use. Must be between 0 and `total`.
force_update (`bool`, *optional*, defaults to `False`):
Whether or not to force and update of the internal state and display (by default, the bar will wait for
`value` to reach the value it predicted corresponds to a time of more than the `update_every` attribute
since the last update to avoid adding boilerplate).
comment (`str`, *optional*):
A comment to add on the left of the progress bar.
|
update
|
python
|
huggingface/transformers
|
src/transformers/utils/notebook.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/notebook.py
|
Apache-2.0
|
def write_line(self, values):
"""
Write the values in the inner table.
Args:
values (`Dict[str, float]`): The values to display.
"""
if self.inner_table is None:
self.inner_table = [list(values.keys()), list(values.values())]
else:
columns = self.inner_table[0]
for key in values.keys():
if key not in columns:
columns.append(key)
self.inner_table[0] = columns
if len(self.inner_table) > 1:
last_values = self.inner_table[-1]
first_column = self.inner_table[0][0]
if last_values[0] != values[first_column]:
# write new line
self.inner_table.append([values[c] if c in values else "No Log" for c in columns])
else:
# update last line
new_values = values
for c in columns:
if c not in new_values.keys():
new_values[c] = last_values[columns.index(c)]
self.inner_table[-1] = [new_values[c] for c in columns]
else:
self.inner_table.append([values[c] for c in columns])
|
Write the values in the inner table.
Args:
values (`Dict[str, float]`): The values to display.
|
write_line
|
python
|
huggingface/transformers
|
src/transformers/utils/notebook.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/notebook.py
|
Apache-2.0
|
def check_peft_version(min_version: str) -> None:
r"""
Checks if the version of PEFT is compatible.
Args:
version (`str`):
The version of PEFT to check against.
"""
if not is_peft_available():
raise ValueError("PEFT is not installed. Please install it with `pip install peft`")
is_peft_version_compatible = version.parse(importlib.metadata.version("peft")) >= version.parse(min_version)
if not is_peft_version_compatible:
raise ValueError(
f"The version of PEFT you are using is not compatible, please use a version that is greater"
f" than {min_version}"
)
|
Checks if the version of PEFT is compatible.
Args:
version (`str`):
The version of PEFT to check against.
|
check_peft_version
|
python
|
huggingface/transformers
|
src/transformers/utils/peft_utils.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/peft_utils.py
|
Apache-2.0
|
def from_dict(cls, config_dict, return_unused_kwargs=False, **kwargs):
"""
Instantiates a [`QuantizationConfigMixin`] from a Python dictionary of parameters.
Args:
config_dict (`Dict[str, Any]`):
Dictionary that will be used to instantiate the configuration object.
return_unused_kwargs (`bool`,*optional*, defaults to `False`):
Whether or not to return a list of unused keyword arguments. Used for `from_pretrained` method in
`PreTrainedModel`.
kwargs (`Dict[str, Any]`):
Additional parameters from which to initialize the configuration object.
Returns:
[`QuantizationConfigMixin`]: The configuration object instantiated from those parameters.
"""
config = cls(**config_dict)
to_remove = []
for key, value in kwargs.items():
if hasattr(config, key):
setattr(config, key, value)
to_remove.append(key)
for key in to_remove:
kwargs.pop(key, None)
if return_unused_kwargs:
return config, kwargs
else:
return config
|
Instantiates a [`QuantizationConfigMixin`] from a Python dictionary of parameters.
Args:
config_dict (`Dict[str, Any]`):
Dictionary that will be used to instantiate the configuration object.
return_unused_kwargs (`bool`,*optional*, defaults to `False`):
Whether or not to return a list of unused keyword arguments. Used for `from_pretrained` method in
`PreTrainedModel`.
kwargs (`Dict[str, Any]`):
Additional parameters from which to initialize the configuration object.
Returns:
[`QuantizationConfigMixin`]: The configuration object instantiated from those parameters.
|
from_dict
|
python
|
huggingface/transformers
|
src/transformers/utils/quantization_config.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/quantization_config.py
|
Apache-2.0
|
def to_json_file(self, json_file_path: Union[str, os.PathLike]):
"""
Save this instance to a JSON file.
Args:
json_file_path (`str` or `os.PathLike`):
Path to the JSON file in which this configuration instance's parameters will be saved.
use_diff (`bool`, *optional*, defaults to `True`):
If set to `True`, only the difference between the config instance and the default
`QuantizationConfig()` is serialized to JSON file.
"""
with open(json_file_path, "w", encoding="utf-8") as writer:
config_dict = self.to_dict()
json_string = json.dumps(config_dict, indent=2, sort_keys=True) + "\n"
writer.write(json_string)
|
Save this instance to a JSON file.
Args:
json_file_path (`str` or `os.PathLike`):
Path to the JSON file in which this configuration instance's parameters will be saved.
use_diff (`bool`, *optional*, defaults to `True`):
If set to `True`, only the difference between the config instance and the default
`QuantizationConfig()` is serialized to JSON file.
|
to_json_file
|
python
|
huggingface/transformers
|
src/transformers/utils/quantization_config.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/quantization_config.py
|
Apache-2.0
|
def __iter__(self):
"""allows `dict(obj)` for situations where obj may be a dict or QuantizationConfigMixin"""
for attr, value in copy.deepcopy(self.__dict__).items():
yield attr, value
|
allows `dict(obj)` for situations where obj may be a dict or QuantizationConfigMixin
|
__iter__
|
python
|
huggingface/transformers
|
src/transformers/utils/quantization_config.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/quantization_config.py
|
Apache-2.0
|
def to_json_string(self, use_diff: bool = True) -> str:
"""
Serializes this instance to a JSON string.
Args:
use_diff (`bool`, *optional*, defaults to `True`):
If set to `True`, only the difference between the config instance and the default `PretrainedConfig()`
is serialized to JSON string.
Returns:
`str`: String containing all the attributes that make up this configuration instance in JSON format.
"""
if use_diff is True:
config_dict = self.to_diff_dict()
else:
config_dict = self.to_dict()
return json.dumps(config_dict, indent=2, sort_keys=True) + "\n"
|
Serializes this instance to a JSON string.
Args:
use_diff (`bool`, *optional*, defaults to `True`):
If set to `True`, only the difference between the config instance and the default `PretrainedConfig()`
is serialized to JSON string.
Returns:
`str`: String containing all the attributes that make up this configuration instance in JSON format.
|
to_json_string
|
python
|
huggingface/transformers
|
src/transformers/utils/quantization_config.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/quantization_config.py
|
Apache-2.0
|
def update(self, **kwargs):
"""
Updates attributes of this class instance with attributes from `kwargs` if they match existing attributes,
returning all the unused kwargs.
Args:
kwargs (`Dict[str, Any]`):
Dictionary of attributes to tentatively update this class.
Returns:
`Dict[str, Any]`: Dictionary containing all the key-value pairs that were not used to update the instance.
"""
to_remove = []
for key, value in kwargs.items():
if hasattr(self, key):
setattr(self, key, value)
to_remove.append(key)
# Remove all the attributes that were updated, without modifying the input dict
unused_kwargs = {key: value for key, value in kwargs.items() if key not in to_remove}
return unused_kwargs
|
Updates attributes of this class instance with attributes from `kwargs` if they match existing attributes,
returning all the unused kwargs.
Args:
kwargs (`Dict[str, Any]`):
Dictionary of attributes to tentatively update this class.
Returns:
`Dict[str, Any]`: Dictionary containing all the key-value pairs that were not used to update the instance.
|
update
|
python
|
huggingface/transformers
|
src/transformers/utils/quantization_config.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/quantization_config.py
|
Apache-2.0
|
def post_init(self):
r"""Safety checker that arguments are correct."""
if self.bits not in [2, 3, 4, 8]:
raise ValueError(f"Only support quantization to [2,3,4,8] bits but found {self.bits}")
if self.group_size != -1 and self.group_size <= 0:
raise ValueError("group_size must be greater than 0 or equal to -1")
|
Safety checker that arguments are correct.
|
post_init
|
python
|
huggingface/transformers
|
src/transformers/utils/quantization_config.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/quantization_config.py
|
Apache-2.0
|
def from_dict(cls, config: Dict[str, Any]):
"""
Override from_dict, used in AutoQuantizationConfig.from_dict in quantizers/auto.py
"""
instance = cls()
instance.quant_config = config["quant_config"]
instance.skip_modules = config["skip_modules"]
return instance
|
Override from_dict, used in AutoQuantizationConfig.from_dict in quantizers/auto.py
|
from_dict
|
python
|
huggingface/transformers
|
src/transformers/utils/quantization_config.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/quantization_config.py
|
Apache-2.0
|
def to_dict(self) -> Dict[str, Any]:
"""
Serializes this instance to a Python dictionary. Returns:
`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
"""
return {
"quant_config": self.quant_config,
"quant_method": self.quant_method,
"skip_modules": self.skip_modules,
}
|
Serializes this instance to a Python dictionary. Returns:
`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
|
to_dict
|
python
|
huggingface/transformers
|
src/transformers/utils/quantization_config.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/quantization_config.py
|
Apache-2.0
|
def to_diff_dict(self) -> Dict[str, Any]:
"""
Removes all attributes from config which correspond to the default config attributes for better readability and
serializes to a Python dictionary.
Returns:
`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance,
"""
config_dict = self.to_dict()
# get the default config dict
default_config_dict = HqqConfig().to_dict()
serializable_config_dict = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
serializable_config_dict[key] = value
return serializable_config_dict
|
Removes all attributes from config which correspond to the default config attributes for better readability and
serializes to a Python dictionary.
Returns:
`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance,
|
to_diff_dict
|
python
|
huggingface/transformers
|
src/transformers/utils/quantization_config.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/quantization_config.py
|
Apache-2.0
|
def post_init(self):
r"""
Safety checker that arguments are correct - also replaces some NoneType arguments with their default values.
"""
if not isinstance(self.load_in_4bit, bool):
raise TypeError("load_in_4bit must be a boolean")
if not isinstance(self.load_in_8bit, bool):
raise TypeError("load_in_8bit must be a boolean")
if not isinstance(self.llm_int8_threshold, float):
raise TypeError("llm_int8_threshold must be a float")
if self.llm_int8_skip_modules is not None and not isinstance(self.llm_int8_skip_modules, list):
raise TypeError("llm_int8_skip_modules must be a list of strings")
if not isinstance(self.llm_int8_enable_fp32_cpu_offload, bool):
raise TypeError("llm_int8_enable_fp32_cpu_offload must be a boolean")
if not isinstance(self.llm_int8_has_fp16_weight, bool):
raise TypeError("llm_int8_has_fp16_weight must be a boolean")
if self.bnb_4bit_compute_dtype is not None and not isinstance(self.bnb_4bit_compute_dtype, torch.dtype):
raise TypeError("bnb_4bit_compute_dtype must be torch.dtype")
if not isinstance(self.bnb_4bit_quant_type, str):
raise TypeError("bnb_4bit_quant_type must be a string")
if not isinstance(self.bnb_4bit_use_double_quant, bool):
raise TypeError("bnb_4bit_use_double_quant must be a boolean")
if self.load_in_4bit and not version.parse(importlib.metadata.version("bitsandbytes")) >= version.parse(
"0.39.0"
):
raise ValueError(
"4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version"
)
|
Safety checker that arguments are correct - also replaces some NoneType arguments with their default values.
|
post_init
|
python
|
huggingface/transformers
|
src/transformers/utils/quantization_config.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/quantization_config.py
|
Apache-2.0
|
def quantization_method(self):
r"""
This method returns the quantization method used for the model. If the model is not quantizable, it returns
`None`.
"""
if self.load_in_8bit:
return "llm_int8"
elif self.load_in_4bit and self.bnb_4bit_quant_type == "fp4":
return "fp4"
elif self.load_in_4bit and self.bnb_4bit_quant_type == "nf4":
return "nf4"
else:
return None
|
This method returns the quantization method used for the model. If the model is not quantizable, it returns
`None`.
|
quantization_method
|
python
|
huggingface/transformers
|
src/transformers/utils/quantization_config.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/quantization_config.py
|
Apache-2.0
|
def to_dict(self) -> Dict[str, Any]:
"""
Serializes this instance to a Python dictionary. Returns:
`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
"""
output = copy.deepcopy(self.__dict__)
output["bnb_4bit_compute_dtype"] = str(output["bnb_4bit_compute_dtype"]).split(".")[1]
output["bnb_4bit_quant_storage"] = str(output["bnb_4bit_quant_storage"]).split(".")[1]
output["load_in_4bit"] = self.load_in_4bit
output["load_in_8bit"] = self.load_in_8bit
return output
|
Serializes this instance to a Python dictionary. Returns:
`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
|
to_dict
|
python
|
huggingface/transformers
|
src/transformers/utils/quantization_config.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/quantization_config.py
|
Apache-2.0
|
def to_diff_dict(self) -> Dict[str, Any]:
"""
Removes all attributes from config which correspond to the default config attributes for better readability and
serializes to a Python dictionary.
Returns:
`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance,
"""
config_dict = self.to_dict()
# get the default config dict
default_config_dict = BitsAndBytesConfig().to_dict()
serializable_config_dict = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
serializable_config_dict[key] = value
return serializable_config_dict
|
Removes all attributes from config which correspond to the default config attributes for better readability and
serializes to a Python dictionary.
Returns:
`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance,
|
to_diff_dict
|
python
|
huggingface/transformers
|
src/transformers/utils/quantization_config.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/quantization_config.py
|
Apache-2.0
|
def post_init(self):
r"""
Safety checker that arguments are correct
"""
if self.bits not in [2, 3, 4, 8]:
raise ValueError(f"Only support quantization to [2,3,4,8] bits but found {self.bits}")
if self.group_size != -1 and self.group_size <= 0:
raise ValueError("group_size must be greater than 0 or equal to -1")
if not (0 < self.damp_percent < 1):
raise ValueError("damp_percent must between 0 and 1.")
if self.dataset is not None:
if isinstance(self.dataset, str):
if self.dataset in ["ptb", "ptb-new"]:
raise ValueError(
f"""{self.dataset} dataset was deprecated. You can only choose between
['wikitext2','c4','c4-new']"""
)
if self.dataset not in ["wikitext2", "c4", "c4-new"]:
raise ValueError(
f"""You have entered a string value for dataset. You can only choose between
['wikitext2','c4','c4-new'], but we found {self.dataset}"""
)
elif not isinstance(self.dataset, list):
raise ValueError(
f"""dataset needs to be either a list of string or a value in
['wikitext2','c4','c4-new'], but we found {self.dataset}"""
)
# make sure backend is back/forward compatible with both gptqmodel (full) and auto-gptq (partial)
if is_gptqmodel_available():
# convert auto-gptq control into gptqmodel backend
if self.backend is None:
self.backend = "auto_trainable" if self.use_exllama is not None and not self.use_exllama else "auto"
else:
# convert gptqmodel backend `auto_trainable` into auto-gptq control
if self.backend == "auto_trainable":
self.use_exllama = False
# auto-gptq specific kernel control logic
if self.use_exllama is None:
# New default behaviour
self.use_exllama = True
if self.exllama_config is None:
self.exllama_config = {"version": ExllamaVersion.ONE}
else:
if "version" not in self.exllama_config:
raise ValueError("`exllama_config` needs to have a `version` key.")
elif self.exllama_config["version"] not in [ExllamaVersion.ONE, ExllamaVersion.TWO]:
exllama_version = self.exllama_config["version"]
raise ValueError(
f"Only supported versions are in [ExllamaVersion.ONE, ExllamaVersion.TWO] - not recognized version {exllama_version}"
)
if self.bits == 4 and self.use_exllama:
if self.exllama_config["version"] == ExllamaVersion.ONE:
logger.info(
"You have activated exllama backend. Note that you can get better inference "
"speed using exllamav2 kernel by setting `exllama_config`."
)
elif self.exllama_config["version"] == ExllamaVersion.TWO:
if is_auto_gptq_available():
optimum_version = version.parse(importlib.metadata.version("optimum"))
autogptq_version = version.parse(importlib.metadata.version("auto_gptq"))
if optimum_version <= version.parse("1.13.2") or autogptq_version <= version.parse("0.4.2"):
raise ValueError(
f"You need optimum > 1.13.2 and auto-gptq > 0.4.2 . Make sure to have that version installed - detected version : optimum {optimum_version} and autogptq {autogptq_version}"
)
if self.modules_in_block_to_quantize is not None:
optimum_version = version.parse(importlib.metadata.version("optimum"))
if optimum_version < version.parse("1.15.0"):
raise ValueError(
"You current version of `optimum` does not support `modules_in_block_to_quantize` quantization argument, please upgrade `optimum` package to a version superior than 1.15.0 ."
)
|
Safety checker that arguments are correct
|
post_init
|
python
|
huggingface/transformers
|
src/transformers/utils/quantization_config.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/quantization_config.py
|
Apache-2.0
|
def to_dict_optimum(self):
"""
Get compatible dict for optimum gptq config
"""
quant_dict = self.to_dict()
# make it compatible with optimum config
quant_dict["disable_exllama"] = not self.use_exllama
return quant_dict
|
Get compatible dict for optimum gptq config
|
to_dict_optimum
|
python
|
huggingface/transformers
|
src/transformers/utils/quantization_config.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/quantization_config.py
|
Apache-2.0
|
def from_dict_optimum(cls, config_dict):
"""
Get compatible class with optimum gptq config dict
"""
if "disable_exllama" in config_dict:
config_dict["use_exllama"] = not config_dict["disable_exllama"]
# switch to None to not trigger the warning
config_dict.pop("disable_exllama")
config = cls(**config_dict)
return config
|
Get compatible class with optimum gptq config dict
|
from_dict_optimum
|
python
|
huggingface/transformers
|
src/transformers/utils/quantization_config.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/quantization_config.py
|
Apache-2.0
|
def post_init(self):
r"""
Safety checker that arguments are correct
"""
if self.backend not in [AwqBackendPackingMethod.AUTOAWQ, AwqBackendPackingMethod.LLMAWQ]:
raise ValueError(
f"Only supported quantization backends in {AwqBackendPackingMethod.AUTOAWQ} and {AwqBackendPackingMethod.LLMAWQ} - not recognized backend {self.backend}"
)
self.version = AWQLinearVersion.from_str(self.version)
if self.version not in [
AWQLinearVersion.GEMM,
AWQLinearVersion.GEMV,
AWQLinearVersion.EXLLAMA,
AWQLinearVersion.IPEX,
]:
raise ValueError(
f"Only supported versions are in [AWQLinearVersion.GEMM, AWQLinearVersion.GEMV, AWQLinearVersion.EXLLAMA, AWQLinearVersion.IPEX] - not recognized version {self.version}"
)
if self.backend == AwqBackendPackingMethod.LLMAWQ:
# Only cuda device can run this function
if not (torch.cuda.is_available() or torch.xpu.is_available()):
raise ValueError("LLM-AWQ backend is only supported on CUDA and XPU")
if torch.cuda.is_available():
compute_capability = torch.cuda.get_device_capability()
major, minor = compute_capability
if major < 8:
raise ValueError("LLM-AWQ backend is only supported on CUDA GPUs with compute capability >= 8.0")
if self.do_fuse and self.fuse_max_seq_len is None:
raise ValueError(
"You cannot enable fused modules without specifying a `fuse_max_seq_len`, make sure to pass a valid `fuse_max_seq_len` for your usecase"
)
if self.do_fuse:
awq_version_supports_fusing = False
MIN_AWQ_VERSION = "0.1.7"
if is_auto_awq_available():
awq_version_supports_fusing = version.parse(importlib.metadata.version("autoawq")) >= version.parse(
MIN_AWQ_VERSION
)
if not awq_version_supports_fusing:
raise ValueError(
f"You current version of `autoawq` does not support module fusing, please upgrade `autoawq` package to at least {MIN_AWQ_VERSION}."
)
if self.modules_to_not_convert is not None:
awq_version_supports_non_conversion = False
MIN_AWQ_VERSION = "0.1.8"
if is_auto_awq_available():
awq_version_supports_non_conversion = version.parse(
importlib.metadata.version("autoawq")
) >= version.parse(MIN_AWQ_VERSION)
if not awq_version_supports_non_conversion:
raise ValueError(
f"You current version of `autoawq` does not support module quantization skipping, please upgrade `autoawq` package to at least {MIN_AWQ_VERSION}."
)
if self.do_fuse and self.modules_to_fuse is not None:
required_keys = [
"hidden_size",
"num_attention_heads",
"num_key_value_heads",
"mlp",
"attention",
"layernorm",
"use_alibi",
]
if not all(key in self.modules_to_fuse for key in required_keys):
raise ValueError(
f"Required fields are missing in the fusing mapping, required fields are {required_keys}"
)
if self.version == AWQLinearVersion.EXLLAMA:
awq_version_supports_exllama = False
MIN_AWQ_VERSION = "0.2.0"
if is_auto_awq_available():
awq_version_supports_exllama = version.parse(importlib.metadata.version("autoawq")) >= version.parse(
MIN_AWQ_VERSION
)
if not awq_version_supports_exllama:
raise ValueError(
f"You current version of `autoawq` does not support exllama backend, "
f"please upgrade `autoawq` package to at least {MIN_AWQ_VERSION}."
)
if self.exllama_config is None:
self.exllama_config = {"version": ExllamaVersion.TWO, "max_input_len": 2048, "max_batch_size": 8}
else:
if "version" not in self.exllama_config:
raise ValueError("`exllama_config` needs to have a `version` key.")
elif self.exllama_config["version"] not in [ExllamaVersion.ONE, ExllamaVersion.TWO]:
exllama_version = self.exllama_config["version"]
raise ValueError(
f"Only supported versions are in [ExllamaVersion.ONE, ExllamaVersion.TWO] - not recognized version {exllama_version}"
)
|
Safety checker that arguments are correct
|
post_init
|
python
|
huggingface/transformers
|
src/transformers/utils/quantization_config.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/quantization_config.py
|
Apache-2.0
|
def post_init(self):
r"""
Safety checker that arguments are correct - also replaces some NoneType arguments with their default values.
"""
if not isinstance(self.in_group_size, int):
raise TypeError("in_group_size must be a float")
if not isinstance(self.out_group_size, int):
raise TypeError("out_group_size must be a float")
if not isinstance(self.num_codebooks, int):
raise TypeError("num_codebooks must be a float")
if not isinstance(self.nbits_per_codebook, int):
raise TypeError("nbits_per_codebook must be a float")
if self.linear_weights_not_to_quantize is not None and not isinstance(
self.linear_weights_not_to_quantize, list
):
raise ValueError("linear_weights_not_to_quantize must be a list of strings")
if self.linear_weights_not_to_quantize is None:
self.linear_weights_not_to_quantize = []
|
Safety checker that arguments are correct - also replaces some NoneType arguments with their default values.
|
post_init
|
python
|
huggingface/transformers
|
src/transformers/utils/quantization_config.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/quantization_config.py
|
Apache-2.0
|
def post_init(self):
r"""
Safety checker that arguments are correct
"""
if self.is_indice_packed is False:
raise ValueError("is_indice_packed should always be True")
|
Safety checker that arguments are correct
|
post_init
|
python
|
huggingface/transformers
|
src/transformers/utils/quantization_config.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/quantization_config.py
|
Apache-2.0
|
def post_init(self):
r"""
Safety checker that arguments are correct
"""
for layer_name, layer_param in self.config_for_layers.items():
VptqLayerConfig(**layer_param)
if self.enable_proxy_error is True:
raise ValueError("enable_proxy_error should always be False until we support training")
|
Safety checker that arguments are correct
|
post_init
|
python
|
huggingface/transformers
|
src/transformers/utils/quantization_config.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/quantization_config.py
|
Apache-2.0
|
def post_init(self):
r"""
Safety checker that arguments are correct
"""
accepted_weights = ["float8", "int8", "int4", "int2"]
accepted_activations = [None, "int8", "float8"]
if self.weights not in accepted_weights:
raise ValueError(f"Only support weights in {accepted_weights} but found {self.weights}")
if self.activations not in accepted_activations:
raise ValueError(f"Only support weights in {accepted_activations} but found {self.activations}")
|
Safety checker that arguments are correct
|
post_init
|
python
|
huggingface/transformers
|
src/transformers/utils/quantization_config.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/quantization_config.py
|
Apache-2.0
|
def post_init(self):
r"""
Safety checker that arguments are correct
"""
accepted_weights = ["int8"]
if self.weights not in accepted_weights:
raise ValueError(f"Only support weights in {accepted_weights} but found {self.weights}")
|
Safety checker that arguments are correct
|
post_init
|
python
|
huggingface/transformers
|
src/transformers/utils/quantization_config.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/quantization_config.py
|
Apache-2.0
|
def from_dict(cls, config_dict, return_unused_kwargs=False, **kwargs):
"""
Instantiates a [`CompressedTensorsConfig`] from a Python dictionary of parameters.
Optionally unwraps any args from the nested quantization_config
Args:
config_dict (`Dict[str, Any]`):
Dictionary that will be used to instantiate the configuration object.
return_unused_kwargs (`bool`,*optional*, defaults to `False`):
Whether or not to return a list of unused keyword arguments. Used for `from_pretrained` method in
`PreTrainedModel`.
kwargs (`Dict[str, Any]`):
Additional parameters from which to initialize the configuration object.
Returns:
[`QuantizationConfigMixin`]: The configuration object instantiated from those parameters.
"""
if "quantization_config" in config_dict:
config_dict = dict(
sparsity_config=config_dict.get("sparsity_config"),
**config_dict["quantization_config"],
)
return super().from_dict(config_dict, return_unused_kwargs=return_unused_kwargs, **kwargs)
|
Instantiates a [`CompressedTensorsConfig`] from a Python dictionary of parameters.
Optionally unwraps any args from the nested quantization_config
Args:
config_dict (`Dict[str, Any]`):
Dictionary that will be used to instantiate the configuration object.
return_unused_kwargs (`bool`,*optional*, defaults to `False`):
Whether or not to return a list of unused keyword arguments. Used for `from_pretrained` method in
`PreTrainedModel`.
kwargs (`Dict[str, Any]`):
Additional parameters from which to initialize the configuration object.
Returns:
[`QuantizationConfigMixin`]: The configuration object instantiated from those parameters.
|
from_dict
|
python
|
huggingface/transformers
|
src/transformers/utils/quantization_config.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/quantization_config.py
|
Apache-2.0
|
def to_dict(self) -> Dict[str, Any]:
"""
Quantization config to be added to config.json
Serializes this instance to a Python dictionary. Returns:
`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
"""
quantization_config = {}
if self.quantization_config is not None:
quantization_config = self.quantization_config.model_dump()
else:
quantization_config["quant_method"] = QuantizationMethod.COMPRESSED_TENSORS
if self.sparsity_config is not None:
quantization_config["sparsity_config"] = self.sparsity_config.model_dump()
else:
quantization_config["sparsity_config"] = {}
return quantization_config
|
Quantization config to be added to config.json
Serializes this instance to a Python dictionary. Returns:
`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
|
to_dict
|
python
|
huggingface/transformers
|
src/transformers/utils/quantization_config.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/quantization_config.py
|
Apache-2.0
|
def to_diff_dict(self) -> Dict[str, Any]:
"""
Removes all attributes from config which correspond to the default config attributes for better readability and
serializes to a Python dictionary.
Returns:
`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance,
"""
config_dict = self.to_dict()
# get the default config dict
default_config_dict = CompressedTensorsConfig().to_dict()
serializable_config_dict = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if key not in default_config_dict or value != default_config_dict[key]:
serializable_config_dict[key] = value
return serializable_config_dict
|
Removes all attributes from config which correspond to the default config attributes for better readability and
serializes to a Python dictionary.
Returns:
`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance,
|
to_diff_dict
|
python
|
huggingface/transformers
|
src/transformers/utils/quantization_config.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/quantization_config.py
|
Apache-2.0
|
def post_init(self):
r"""
Safety checker that arguments are correct - also replaces some NoneType arguments with their default values.
"""
if self.bits not in [2, 3, 4]:
raise ValueError("bits must be 2, 3, or 4")
if self.p not in [1, 2]:
raise ValueError("p must be 1 or 2. 2 is always better in practice")
if self.group_size not in [64, 128, 256]:
raise ValueError("group_size must be 64, 128, or 256")
if self.hadamard_size % self.group_size != 0:
raise ValueError("hadamard_size must be divisible by group_size")
|
Safety checker that arguments are correct - also replaces some NoneType arguments with their default values.
|
post_init
|
python
|
huggingface/transformers
|
src/transformers/utils/quantization_config.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/quantization_config.py
|
Apache-2.0
|
def _get_ao_version() -> version.Version:
"""Centralized check for TorchAO availability and version requirements."""
if not is_torchao_available():
raise ValueError("TorchAoConfig requires torchao to be installed. Install with `pip install torchao`")
return version.parse(importlib.metadata.version("torchao"))
|
Centralized check for TorchAO availability and version requirements.
|
_get_ao_version
|
python
|
huggingface/transformers
|
src/transformers/utils/quantization_config.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/quantization_config.py
|
Apache-2.0
|
def _get_torchao_quant_type_to_method(self):
"""Get mapping of quant_type strings to their corresponding methods."""
from torchao.quantization import (
autoquant,
int4_weight_only,
int8_dynamic_activation_int8_weight,
int8_weight_only,
)
return {
"int4_weight_only": int4_weight_only,
"int8_weight_only": int8_weight_only,
"int8_dynamic_activation_int8_weight": int8_dynamic_activation_int8_weight,
"autoquant": autoquant,
}
|
Get mapping of quant_type strings to their corresponding methods.
|
_get_torchao_quant_type_to_method
|
python
|
huggingface/transformers
|
src/transformers/utils/quantization_config.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/quantization_config.py
|
Apache-2.0
|
def get_apply_tensor_subclass(self):
"""Create the appropriate quantization method based on configuration."""
if isinstance(self.quant_type, str):
methods = self._get_torchao_quant_type_to_method()
quant_type_kwargs = self.quant_type_kwargs.copy()
if (
not torch.cuda.is_available()
and is_torchao_available()
and self.quant_type == "int4_weight_only"
and version.parse(importlib.metadata.version("torchao")) >= version.parse("0.8.0")
and quant_type_kwargs.get("layout", None) is None
):
if torch.xpu.is_available():
if version.parse(importlib.metadata.version("torchao")) >= version.parse(
"0.11.0"
) and version.parse(importlib.metadata.version("torch")) > version.parse("2.7.9"):
from torchao.dtypes import Int4XPULayout
from torchao.quantization.quant_primitives import ZeroPointDomain
quant_type_kwargs["layout"] = Int4XPULayout()
quant_type_kwargs["zero_point_domain"] = ZeroPointDomain.INT
else:
raise ValueError(
"TorchAoConfig requires torchao >= 0.11.0 and torch >= 2.8.0 for XPU support. Please upgrade the version or use run on CPU with the cpu version pytorch."
)
else:
from torchao.dtypes import Int4CPULayout
quant_type_kwargs["layout"] = Int4CPULayout()
return methods[self.quant_type](**quant_type_kwargs)
else:
return self.quant_type
|
Create the appropriate quantization method based on configuration.
|
get_apply_tensor_subclass
|
python
|
huggingface/transformers
|
src/transformers/utils/quantization_config.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/quantization_config.py
|
Apache-2.0
|
def post_init(self):
r"""
Safety checker that arguments are correct - also replaces some NoneType arguments with their default values.
"""
if not isinstance(self.bits, int):
raise TypeError("bits must be an int")
if not isinstance(self.beta1, int):
raise TypeError("beta1 must be an int")
if not isinstance(self.beta2, int):
raise TypeError("beta2 must be an int")
if self.bits != 3:
raise ValueError("SpQR currently only supports bits = 3")
if self.beta1 != 16:
raise ValueError("SpQR currently only supports beta1 = 16")
if self.beta2 != 16:
raise ValueError("SpQR currently only supports beta2 = 16")
if not isinstance(self.shapes, dict):
raise TypeError("shapes must be a dict")
|
Safety checker that arguments are correct - also replaces some NoneType arguments with their default values.
|
post_init
|
python
|
huggingface/transformers
|
src/transformers/utils/quantization_config.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/quantization_config.py
|
Apache-2.0
|
def post_init(self):
r"""
Safety checker that arguments are correct
"""
self.activation_scheme = self.activation_scheme.lower()
if self.activation_scheme not in ["dynamic"]:
raise ValueError(f"Activation scheme {self.activation_scheme} not supported")
if len(self.weight_block_size) != 2:
raise ValueError("weight_block_size must be a tuple of two integers")
if self.weight_block_size[0] <= 0 or self.weight_block_size[1] <= 0:
raise ValueError("weight_block_size must be a tuple of two positive integers")
|
Safety checker that arguments are correct
|
post_init
|
python
|
huggingface/transformers
|
src/transformers/utils/quantization_config.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/quantization_config.py
|
Apache-2.0
|
def require_version(requirement: str, hint: Optional[str] = None) -> None:
"""
Perform a runtime check of the dependency versions, using the exact same syntax used by pip.
The installed module version comes from the *site-packages* dir via *importlib.metadata*.
Args:
requirement (`str`): pip style definition, e.g., "tokenizers==0.9.4", "tqdm>=4.27", "numpy"
hint (`str`, *optional*): what suggestion to print in case of requirements not being met
Example:
```python
require_version("pandas>1.1.2")
require_version("numpy>1.18.5", "this is important to have for whatever reason")
```"""
hint = f"\n{hint}" if hint is not None else ""
# non-versioned check
if re.match(r"^[\w_\-\d]+$", requirement):
pkg, op, want_ver = requirement, None, None
else:
match = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)", requirement)
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"
f" got {requirement}"
)
pkg, want_full = match[0]
want_range = want_full.split(",") # there could be multiple requirements
wanted = {}
for w in want_range:
match = re.findall(r"^([\s!=<>]{1,2})(.+)", w)
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"
f" but got {requirement}"
)
op, want_ver = match[0]
wanted[op] = want_ver
if op not in ops:
raise ValueError(f"{requirement}: need one of {list(ops.keys())}, but got {op}")
# special case
if pkg == "python":
got_ver = ".".join([str(x) for x in sys.version_info[:3]])
for op, want_ver in wanted.items():
_compare_versions(op, got_ver, want_ver, requirement, pkg, hint)
return
# check if any version is installed
try:
got_ver = importlib.metadata.version(pkg)
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"The '{requirement}' distribution was not found and is required by this application. {hint}"
)
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(op, got_ver, want_ver, requirement, pkg, hint)
|
Perform a runtime check of the dependency versions, using the exact same syntax used by pip.
The installed module version comes from the *site-packages* dir via *importlib.metadata*.
Args:
requirement (`str`): pip style definition, e.g., "tokenizers==0.9.4", "tqdm>=4.27", "numpy"
hint (`str`, *optional*): what suggestion to print in case of requirements not being met
Example:
```python
require_version("pandas>1.1.2")
require_version("numpy>1.18.5", "this is important to have for whatever reason")
```
|
require_version
|
python
|
huggingface/transformers
|
src/transformers/utils/versions.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/versions.py
|
Apache-2.0
|
def get_available_devices() -> frozenset[str]:
"""
Returns a frozenset of devices available for the current PyTorch installation.
"""
devices = {"cpu"} # `cpu` is always supported as a device in PyTorch
if is_torch_cuda_available():
devices.add("cuda")
if is_torch_mps_available():
devices.add("mps")
if is_torch_xpu_available():
devices.add("xpu")
if is_torch_npu_available():
devices.add("npu")
if is_torch_hpu_available():
devices.add("hpu")
if is_torch_mlu_available():
devices.add("mlu")
if is_torch_musa_available():
devices.add("musa")
return frozenset(devices)
|
Returns a frozenset of devices available for the current PyTorch installation.
|
get_available_devices
|
python
|
huggingface/transformers
|
src/transformers/utils/__init__.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/__init__.py
|
Apache-2.0
|
def create_and_test_config_from_and_save_pretrained_composite(self):
"""
Tests that composite or nested configs can be loaded and saved correctly. In case the config
has a sub-config, we should be able to call `sub_config.from_pretrained('general_config_file')`
and get a result same as if we loaded the whole config and obtained `config.sub_config` from it.
"""
config = self.config_class(**self.inputs_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
config.save_pretrained(tmpdirname)
general_config_loaded = self.config_class.from_pretrained(tmpdirname)
general_config_dict = config.to_dict()
# Iterate over all sub_configs if there are any and load them with their own classes
sub_configs = general_config_loaded.sub_configs
for sub_config_key, sub_class in sub_configs.items():
if sub_class.__name__ == "AutoConfig":
sub_class = sub_class.for_model(**general_config_dict[sub_config_key]).__class__
sub_config_loaded = sub_class.from_pretrained(tmpdirname)
else:
sub_config_loaded = sub_class.from_pretrained(tmpdirname)
# Pop `transformers_version`, it never exists when a config is part of a general composite config
# Verify that loading with subconfig class results in same dict as if we loaded with general composite config class
sub_config_loaded_dict = sub_config_loaded.to_dict()
sub_config_loaded_dict.pop("transformers_version", None)
self.parent.assertEqual(sub_config_loaded_dict, general_config_dict[sub_config_key])
# Verify that the loaded config type is same as in the general config
type_from_general_config = type(getattr(general_config_loaded, sub_config_key))
self.parent.assertTrue(isinstance(sub_config_loaded, type_from_general_config))
# Now save only the sub-config and load it back to make sure the whole load-save-load pipeline works
with tempfile.TemporaryDirectory() as tmpdirname2:
sub_config_loaded.save_pretrained(tmpdirname2)
sub_config_loaded_2 = sub_class.from_pretrained(tmpdirname2)
self.parent.assertEqual(sub_config_loaded.to_dict(), sub_config_loaded_2.to_dict())
|
Tests that composite or nested configs can be loaded and saved correctly. In case the config
has a sub-config, we should be able to call `sub_config.from_pretrained('general_config_file')`
and get a result same as if we loaded the whole config and obtained `config.sub_config` from it.
|
create_and_test_config_from_and_save_pretrained_composite
|
python
|
huggingface/transformers
|
tests/test_configuration_common.py
|
https://github.com/huggingface/transformers/blob/master/tests/test_configuration_common.py
|
Apache-2.0
|
def prepare_image_inputs(
batch_size,
min_resolution,
max_resolution,
num_channels,
size_divisor=None,
equal_resolution=False,
numpify=False,
torchify=False,
):
"""This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
or a list of PyTorch tensors if one specifies torchify=True.
One can specify whether the images are of the same resolution or not.
"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
image_inputs = []
for i in range(batch_size):
if equal_resolution:
width = height = max_resolution
else:
# To avoid getting image width/height 0
if size_divisor is not None:
# If `size_divisor` is defined, the image needs to have width/size >= `size_divisor`
min_resolution = max(size_divisor, min_resolution)
width, height = np.random.choice(np.arange(min_resolution, max_resolution), 2)
image_inputs.append(np.random.randint(255, size=(num_channels, width, height), dtype=np.uint8))
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
image_inputs = [Image.fromarray(np.moveaxis(image, 0, -1)) for image in image_inputs]
if torchify:
image_inputs = [torch.from_numpy(image) for image in image_inputs]
if numpify:
# Numpy images are typically in channels last format
image_inputs = [image.transpose(1, 2, 0) for image in image_inputs]
return image_inputs
|
This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
or a list of PyTorch tensors if one specifies torchify=True.
One can specify whether the images are of the same resolution or not.
|
prepare_image_inputs
|
python
|
huggingface/transformers
|
tests/test_image_processing_common.py
|
https://github.com/huggingface/transformers/blob/master/tests/test_image_processing_common.py
|
Apache-2.0
|
def prepare_video(num_frames, num_channels, width=10, height=10, numpify=False, torchify=False):
"""This function prepares a video as a list of PIL images/NumPy arrays/PyTorch tensors."""
video = []
for i in range(num_frames):
video.append(np.random.randint(255, size=(num_channels, width, height), dtype=np.uint8))
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
video = [Image.fromarray(np.moveaxis(frame, 0, -1)) for frame in video]
if torchify:
video = [torch.from_numpy(frame) for frame in video]
return video
|
This function prepares a video as a list of PIL images/NumPy arrays/PyTorch tensors.
|
prepare_video
|
python
|
huggingface/transformers
|
tests/test_image_processing_common.py
|
https://github.com/huggingface/transformers/blob/master/tests/test_image_processing_common.py
|
Apache-2.0
|
def prepare_video_inputs(
batch_size,
num_frames,
num_channels,
min_resolution,
max_resolution,
equal_resolution=False,
numpify=False,
torchify=False,
):
"""This function prepares a batch of videos: a list of list of PIL images, or a list of list of numpy arrays if
one specifies numpify=True, or a list of list of PyTorch tensors if one specifies torchify=True.
One can specify whether the videos are of the same resolution or not.
"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
video_inputs = []
for _ in range(batch_size):
if equal_resolution:
width = height = max_resolution
else:
width, height = np.random.choice(np.arange(min_resolution, max_resolution), 2)
video = prepare_video(
num_frames=num_frames,
num_channels=num_channels,
width=width,
height=height,
numpify=numpify,
torchify=torchify,
)
video_inputs.append(video)
return video_inputs
|
This function prepares a batch of videos: a list of list of PIL images, or a list of list of numpy arrays if
one specifies numpify=True, or a list of list of PyTorch tensors if one specifies torchify=True.
One can specify whether the videos are of the same resolution or not.
|
prepare_video_inputs
|
python
|
huggingface/transformers
|
tests/test_image_processing_common.py
|
https://github.com/huggingface/transformers/blob/master/tests/test_image_processing_common.py
|
Apache-2.0
|
def test_save_load_fast_slow(self):
"Test that we can load a fast image processor from a slow one and vice-versa."
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest("Skipping slow/fast save/load test as one of the image processors is not defined")
image_processor_dict = self.image_processor_tester.prepare_image_processor_dict()
image_processor_slow_0 = self.image_processing_class(**image_processor_dict)
# Load fast image processor from slow one
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_slow_0.save_pretrained(tmpdirname)
image_processor_fast_0 = self.fast_image_processing_class.from_pretrained(tmpdirname)
image_processor_fast_1 = self.fast_image_processing_class(**image_processor_dict)
# Load slow image processor from fast one
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_fast_1.save_pretrained(tmpdirname)
image_processor_slow_1 = self.image_processing_class.from_pretrained(tmpdirname)
dict_slow_0 = image_processor_slow_0.to_dict()
dict_slow_1 = image_processor_slow_1.to_dict()
difference = {
key: dict_slow_0.get(key) if key in dict_slow_0 else dict_slow_1.get(key)
for key in set(dict_slow_0) ^ set(dict_slow_1)
}
dict_slow_0 = {key: dict_slow_0[key] for key in set(dict_slow_0) & set(dict_slow_1)}
dict_slow_1 = {key: dict_slow_1[key] for key in set(dict_slow_0) & set(dict_slow_1)}
# check that all additional keys are None, except for `default_to_square` and `data_format` which are only set in fast processors
self.assertTrue(
all(value is None for key, value in difference.items() if key not in ["default_to_square", "data_format"])
)
# check that the remaining keys are the same
self.assertEqual(dict_slow_0, dict_slow_1)
dict_fast_0 = image_processor_fast_0.to_dict()
dict_fast_1 = image_processor_fast_1.to_dict()
difference = {
key: dict_fast_0.get(key) if key in dict_fast_0 else dict_fast_1.get(key)
for key in set(dict_fast_0) ^ set(dict_fast_1)
}
dict_fast_0 = {key: dict_fast_0[key] for key in set(dict_fast_0) & set(dict_fast_1)}
dict_fast_1 = {key: dict_fast_1[key] for key in set(dict_fast_0) & set(dict_fast_1)}
# check that all additional keys are None, except for `default_to_square` and `data_format` which are only set in fast processors
self.assertTrue(
all(value is None for key, value in difference.items() if key not in ["default_to_square", "data_format"])
)
# check that the remaining keys are the same
self.assertEqual(dict_fast_0, dict_fast_1)
|
Test that we can load a fast image processor from a slow one and vice-versa.
|
test_save_load_fast_slow
|
python
|
huggingface/transformers
|
tests/test_image_processing_common.py
|
https://github.com/huggingface/transformers/blob/master/tests/test_image_processing_common.py
|
Apache-2.0
|
def test_save_load_fast_slow_auto(self):
"Test that we can load a fast image processor from a slow one and vice-versa using AutoImageProcessor."
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest("Skipping slow/fast save/load test as one of the image processors is not defined")
image_processor_dict = self.image_processor_tester.prepare_image_processor_dict()
image_processor_slow_0 = self.image_processing_class(**image_processor_dict)
# Load fast image processor from slow one
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_slow_0.save_pretrained(tmpdirname)
image_processor_fast_0 = AutoImageProcessor.from_pretrained(tmpdirname, use_fast=True)
image_processor_fast_1 = self.fast_image_processing_class(**image_processor_dict)
# Load slow image processor from fast one
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_fast_1.save_pretrained(tmpdirname)
image_processor_slow_1 = AutoImageProcessor.from_pretrained(tmpdirname, use_fast=False)
dict_slow_0 = image_processor_slow_0.to_dict()
dict_slow_1 = image_processor_slow_1.to_dict()
difference = {
key: dict_slow_0.get(key) if key in dict_slow_0 else dict_slow_1.get(key)
for key in set(dict_slow_0) ^ set(dict_slow_1)
}
dict_slow_0 = {key: dict_slow_0[key] for key in set(dict_slow_0) & set(dict_slow_1)}
dict_slow_1 = {key: dict_slow_1[key] for key in set(dict_slow_0) & set(dict_slow_1)}
# check that all additional keys are None, except for `default_to_square` and `data_format` which are only set in fast processors
self.assertTrue(
all(value is None for key, value in difference.items() if key not in ["default_to_square", "data_format"])
)
# check that the remaining keys are the same
self.assertEqual(dict_slow_0, dict_slow_1)
dict_fast_0 = image_processor_fast_0.to_dict()
dict_fast_1 = image_processor_fast_1.to_dict()
difference = {
key: dict_fast_0.get(key) if key in dict_fast_0 else dict_fast_1.get(key)
for key in set(dict_fast_0) ^ set(dict_fast_1)
}
dict_fast_0 = {key: dict_fast_0[key] for key in set(dict_fast_0) & set(dict_fast_1)}
dict_fast_1 = {key: dict_fast_1[key] for key in set(dict_fast_0) & set(dict_fast_1)}
# check that all additional keys are None, except for `default_to_square` and `data_format` which are only set in fast processors
self.assertTrue(
all(value is None for key, value in difference.items() if key not in ["default_to_square", "data_format"])
)
# check that the remaining keys are the same
self.assertEqual(dict_fast_0, dict_fast_1)
|
Test that we can load a fast image processor from a slow one and vice-versa using AutoImageProcessor.
|
test_save_load_fast_slow_auto
|
python
|
huggingface/transformers
|
tests/test_image_processing_common.py
|
https://github.com/huggingface/transformers/blob/master/tests/test_image_processing_common.py
|
Apache-2.0
|
def test_batching_equivalence(self, atol=1e-5, rtol=1e-5):
"""
Tests that the model supports batching and that the output is the nearly the same for the same input in
different batch sizes.
(Why "nearly the same" not "exactly the same"? Batching uses different matmul shapes, which often leads to
different results: https://github.com/huggingface/transformers/issues/25420#issuecomment-1775317535)
"""
def recursive_check(batched_object, single_row_object, model_name, key):
if isinstance(batched_object, (list, tuple)):
for batched_object_value, single_row_object_value in zip(batched_object, single_row_object):
recursive_check(batched_object_value, single_row_object_value, model_name, key)
elif isinstance(batched_object, dict):
for batched_object_value, single_row_object_value in zip(
batched_object.values(), single_row_object.values()
):
recursive_check(batched_object_value, single_row_object_value, model_name, key)
# do not compare returned loss (0-dim tensor) / codebook ids (int) / caching objects
elif batched_object is None or not isinstance(batched_object, torch.Tensor):
return
elif batched_object.dim() == 0:
return
# do not compare int or bool outputs as they are mostly computed with max/argmax/topk methods which are
# very sensitive to the inputs (e.g. tiny differences may give totally different results)
elif not torch.is_floating_point(batched_object):
return
else:
# indexing the first element does not always work
# e.g. models that output similarity scores of size (N, M) would need to index [0, 0]
slice_ids = [slice(0, index) for index in single_row_object.shape]
batched_row = batched_object[slice_ids]
self.assertFalse(
torch.isnan(batched_row).any(), f"Batched output has `nan` in {model_name} for key={key}"
)
self.assertFalse(
torch.isinf(batched_row).any(), f"Batched output has `inf` in {model_name} for key={key}"
)
self.assertFalse(
torch.isnan(single_row_object).any(), f"Single row output has `nan` in {model_name} for key={key}"
)
self.assertFalse(
torch.isinf(single_row_object).any(), f"Single row output has `inf` in {model_name} for key={key}"
)
try:
torch.testing.assert_close(batched_row, single_row_object, atol=atol, rtol=rtol)
except AssertionError as e:
msg = f"Batched and Single row outputs are not equal in {model_name} for key={key}.\n\n"
msg += str(e)
raise AssertionError(msg)
set_model_tester_for_less_flaky_test(self)
config, batched_input = self.model_tester.prepare_config_and_inputs_for_common()
set_config_for_less_flaky_test(config)
for model_class in self.all_model_classes:
config.output_hidden_states = True
model_name = model_class.__name__
if hasattr(self.model_tester, "prepare_config_and_inputs_for_model_class"):
config, batched_input = self.model_tester.prepare_config_and_inputs_for_model_class(model_class)
batched_input_prepared = self._prepare_for_class(batched_input, model_class)
model = model_class(config).to(torch_device).eval()
set_model_for_less_flaky_test(model)
batch_size = self.model_tester.batch_size
single_row_input = {}
for key, value in batched_input_prepared.items():
if isinstance(value, torch.Tensor) and value.shape[0] % batch_size == 0:
# e.g. musicgen has inputs of size (bs*codebooks). in most cases value.shape[0] == batch_size
single_batch_shape = value.shape[0] // batch_size
single_row_input[key] = value[:single_batch_shape]
else:
single_row_input[key] = value
with torch.no_grad():
model_batched_output = model(**batched_input_prepared)
model_row_output = model(**single_row_input)
if isinstance(model_batched_output, torch.Tensor):
model_batched_output = {"model_output": model_batched_output}
model_row_output = {"model_output": model_row_output}
for key in model_batched_output:
# DETR starts from zero-init queries to decoder, leading to cos_similarity = `nan`
if hasattr(self, "zero_init_hidden_state") and "decoder_hidden_states" in key:
model_batched_output[key] = model_batched_output[key][1:]
model_row_output[key] = model_row_output[key][1:]
recursive_check(model_batched_output[key], model_row_output[key], model_name, key)
|
Tests that the model supports batching and that the output is the nearly the same for the same input in
different batch sizes.
(Why "nearly the same" not "exactly the same"? Batching uses different matmul shapes, which often leads to
different results: https://github.com/huggingface/transformers/issues/25420#issuecomment-1775317535)
|
test_batching_equivalence
|
python
|
huggingface/transformers
|
tests/test_modeling_common.py
|
https://github.com/huggingface/transformers/blob/master/tests/test_modeling_common.py
|
Apache-2.0
|
def _make_attention_mask_non_null(self, inputs_dict):
"""Make sure no sequence has all zeros as attention mask"""
for k in ["attention_mask", "encoder_attention_mask", "decoder_attention_mask"]:
if k in inputs_dict:
attention_mask = inputs_dict[k]
# Make sure no all 0s attention masks - to avoid failure at this moment.
# Put `1` at the beginning of sequences to make it still work when combining causal attention masks.
# TODO: remove this line once a fix regarding large negative values for attention mask is done.
attention_mask = torch.cat(
[torch.ones_like(attention_mask[:, :1], dtype=attention_mask.dtype), attention_mask[:, 1:]], dim=-1
)
# Here we make the first sequence with all 0s as attention mask.
# Currently, this will fail for `TFWav2Vec2Model`. This is caused by the different large negative
# values, like `1e-4`, `1e-9`, `1e-30` and `-inf` for attention mask across models/frameworks.
# TODO: enable this block once the large negative values thing is cleaned up.
# (see https://github.com/huggingface/transformers/issues/14859)
# attention_mask = torch.cat(
# [torch.zeros_like(attention_mask[:1], dtype=attention_mask.dtype), attention_mask[1:]],
# dim=0
# )
inputs_dict[k] = attention_mask
|
Make sure no sequence has all zeros as attention mask
|
_make_attention_mask_non_null
|
python
|
huggingface/transformers
|
tests/test_modeling_common.py
|
https://github.com/huggingface/transformers/blob/master/tests/test_modeling_common.py
|
Apache-2.0
|
def _postprocessing_to_ignore_test_cases(self, tf_outputs, pt_outputs, model_class):
"""For temporarily ignoring some failed test cases (issues to be fixed)"""
tf_keys = {k for k, v in tf_outputs.items() if v is not None}
pt_keys = {k for k, v in pt_outputs.items() if v is not None}
key_differences = tf_keys.symmetric_difference(pt_keys)
if model_class.__name__ in [
"FlaubertWithLMHeadModel",
"FunnelForPreTraining",
"ElectraForPreTraining",
"XLMWithLMHeadModel",
]:
for k in key_differences:
if k in ["loss", "losses"]:
tf_keys.discard(k)
pt_keys.discard(k)
elif model_class.__name__.startswith("GPT2"):
# `TFGPT2` has `past_key_values` as a tensor while `GPT2` has it as a tuple.
tf_keys.discard("past_key_values")
pt_keys.discard("past_key_values")
# create new outputs from the remaining fields
new_tf_outputs = type(tf_outputs)(**{k: tf_outputs[k] for k in tf_keys})
new_pt_outputs = type(pt_outputs)(**{k: pt_outputs[k] for k in pt_keys})
return new_tf_outputs, new_pt_outputs
|
For temporarily ignoring some failed test cases (issues to be fixed)
|
_postprocessing_to_ignore_test_cases
|
python
|
huggingface/transformers
|
tests/test_modeling_common.py
|
https://github.com/huggingface/transformers/blob/master/tests/test_modeling_common.py
|
Apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.