code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def find_custom_args_with_details(file_content: str, custom_args_var_name: str) -> list[dict]:
"""
Find the given custom args variable in the file content and return its content.
Args:
file_content: The string content of the Python file.
custom_args_var_name: The name of the custom args variable.
"""
# Escape the variable_name to handle any special regex characters it might contain
escaped_variable_name = re.escape(custom_args_var_name)
# Construct the regex pattern dynamically with the specific variable name
# This regex looks for:
# ^\s* : Start of a line with optional leading whitespace.
# ({escaped_variable_name}) : Capture the exact variable name.
# \s*=\s* : An equals sign, surrounded by optional whitespace.
# (r?\"\"\") : Capture the opening triple quotes (raw or normal string).
# (.*?) : Capture the content (non-greedy).
# (\"\"\") : Match the closing triple quotes.
regex_pattern = rf"^\s*({escaped_variable_name})\s*=\s*(r?\"\"\")(.*?)(\"\"\")"
flags = re.MULTILINE | re.DOTALL
# Use re.search to find the first match
match = re.search(regex_pattern, file_content, flags)
if match:
# match.group(1) will be the variable_name itself
# match.group(3) will be the content inside the triple quotes
content = match.group(3).strip()
return content
return None
|
Find the given custom args variable in the file content and return its content.
Args:
file_content: The string content of the Python file.
custom_args_var_name: The name of the custom args variable.
|
find_custom_args_with_details
|
python
|
huggingface/transformers
|
utils/check_docstrings.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_docstrings.py
|
Apache-2.0
|
def update_file_with_new_docstrings(
candidate_file, lines, line_starts_candidates, line_ends_candidates, overwrite=False
):
"""
For a given file, update the docstrings for all @auto_docstring candidates and write the new content.
"""
content_base_file_new_lines = lines[: line_ends_candidates[0]]
current_line_start = line_starts_candidates[0]
current_line_end = line_ends_candidates[0]
index = 1
missing_docstring_args_warnings = []
fill_docstring_args_warnings = []
while index <= len(line_starts_candidates):
custom_args_dict = {}
auto_docstring_signature_content = "".join(lines[current_line_start:current_line_end])
match = re.findall(r"custom_args=(\w+)", auto_docstring_signature_content)
if match:
custom_args_var_name = match[0]
custom_args_var_content = find_custom_args_with_details("\n".join(lines), custom_args_var_name)
if custom_args_var_content:
custom_args_dict, _ = parse_docstring(custom_args_var_content)
new_docstring = ""
found_init_method = False
# Function
if " def" in lines[current_line_end]:
new_docstring, sig_line_end, docstring_end, missing_docstring_args, fill_docstring_args = (
generate_new_docstring_for_function(lines, current_line_end, custom_args_dict)
)
# Class
elif "class " in lines[current_line_end]:
(
new_docstring,
init_method_line,
init_method_sig_line_end,
init_method_docstring_end,
missing_docstring_args,
fill_docstring_args,
) = generate_new_docstring_for_class(lines, current_line_end, custom_args_dict)
found_init_method = init_method_line is not None
# Add warnings if needed
if missing_docstring_args:
for arg in missing_docstring_args:
missing_docstring_args_warnings.append(f" - {arg} line {current_line_end}")
if fill_docstring_args:
for arg in fill_docstring_args:
fill_docstring_args_warnings.append(f" - {arg} line {current_line_end}")
# Write new lines
if index >= len(line_ends_candidates) or line_ends_candidates[index] > current_line_end:
if " def" in lines[current_line_end]:
content_base_file_new_lines += lines[current_line_end:sig_line_end]
if new_docstring != "":
content_base_file_new_lines += new_docstring.split("\n")
if index < len(line_ends_candidates):
content_base_file_new_lines += lines[docstring_end + 1 : line_ends_candidates[index]]
else:
content_base_file_new_lines += lines[docstring_end + 1 :]
elif found_init_method:
content_base_file_new_lines += lines[current_line_end:init_method_sig_line_end]
if new_docstring != "":
content_base_file_new_lines += new_docstring.split("\n")
if index < len(line_ends_candidates):
content_base_file_new_lines += lines[init_method_docstring_end + 1 : line_ends_candidates[index]]
else:
content_base_file_new_lines += lines[init_method_docstring_end + 1 :]
elif index < len(line_ends_candidates):
content_base_file_new_lines += lines[current_line_end : line_ends_candidates[index]]
else:
content_base_file_new_lines += lines[current_line_end:]
if index < len(line_ends_candidates):
current_line_end = line_ends_candidates[index]
current_line_start = line_starts_candidates[index]
index += 1
content_base_file_new = "\n".join(content_base_file_new_lines)
if overwrite:
with open(candidate_file, "w", encoding="utf-8") as f:
f.write(content_base_file_new)
return missing_docstring_args_warnings, fill_docstring_args_warnings
|
For a given file, update the docstrings for all @auto_docstring candidates and write the new content.
|
update_file_with_new_docstrings
|
python
|
huggingface/transformers
|
utils/check_docstrings.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_docstrings.py
|
Apache-2.0
|
def check_docstrings(overwrite: bool = False, check_all: bool = False):
"""
Check docstrings of all public objects that are callables and are documented. By default, only checks the diff.
Args:
overwrite (`bool`, *optional*, defaults to `False`):
Whether to fix inconsistencies or not.
check_all (`bool`, *optional*, defaults to `False`):
Whether to check all files.
"""
module_diff_files = None
if not check_all:
module_diff_files = set()
repo = Repo(PATH_TO_REPO)
# Diff from index to unstaged files
for modified_file_diff in repo.index.diff(None):
if modified_file_diff.a_path.startswith("src/transformers"):
module_diff_files.add(modified_file_diff.a_path)
# Diff from index to `main`
for modified_file_diff in repo.index.diff(repo.refs.main.commit):
if modified_file_diff.a_path.startswith("src/transformers"):
module_diff_files.add(modified_file_diff.a_path)
# quick escape route: if there are no module files in the diff, skip this check
if len(module_diff_files) == 0:
return
print(" Checking docstrings in the following files:" + "\n - " + "\n - ".join(module_diff_files))
failures = []
hard_failures = []
to_clean = []
for name in dir(transformers):
# Skip objects that are private or not documented.
if name.startswith("_") or ignore_undocumented(name) or name in OBJECTS_TO_IGNORE:
continue
obj = getattr(transformers, name)
if not callable(obj) or not isinstance(obj, type) or getattr(obj, "__doc__", None) is None:
continue
# If we are checking against the diff, we skip objects that are not part of the diff.
if module_diff_files is not None:
object_file = find_source_file(getattr(transformers, name))
object_file_relative_path = "src/" + str(object_file).split("/src/")[1]
if object_file_relative_path not in module_diff_files:
continue
# Check docstring
try:
result = match_docstring_with_signature(obj)
if result is not None:
old_doc, new_doc = result
else:
old_doc, new_doc = None, None
except Exception as e:
print(e)
hard_failures.append(name)
continue
if old_doc != new_doc:
print("name", name)
print("old_doc", old_doc)
print("new_doc", new_doc)
if overwrite:
fix_docstring(obj, old_doc, new_doc)
else:
failures.append(name)
elif not overwrite and new_doc is not None and ("<fill_type>" in new_doc or "<fill_docstring>" in new_doc):
to_clean.append(name)
# Deal with errors
error_message = ""
if len(hard_failures) > 0:
error_message += (
"The argument part of the docstrings of the following objects could not be processed, check they are "
"properly formatted."
)
error_message += "\n" + "\n".join([f"- {name}" for name in hard_failures])
if len(failures) > 0:
error_message += (
"The following objects docstrings do not match their signature. Run `make fix-copies` to fix this. "
"In some cases, this error may be raised incorrectly by the docstring checker. If you think this is the "
"case, you can manually check the docstrings and then add the object name to `OBJECTS_TO_IGNORE` in "
"`utils/check_docstrings.py`."
)
error_message += "\n" + "\n".join([f"- {name}" for name in failures])
if len(to_clean) > 0:
error_message += (
"The following objects docstrings contain templates you need to fix: search for `<fill_type>` or "
"`<fill_docstring>`."
)
error_message += "\n" + "\n".join([f"- {name}" for name in to_clean])
if len(error_message) > 0:
error_message = "There was at least one problem when checking docstrings of public objects.\n" + error_message
raise ValueError(error_message)
|
Check docstrings of all public objects that are callables and are documented. By default, only checks the diff.
Args:
overwrite (`bool`, *optional*, defaults to `False`):
Whether to fix inconsistencies or not.
check_all (`bool`, *optional*, defaults to `False`):
Whether to check all files.
|
check_docstrings
|
python
|
huggingface/transformers
|
utils/check_docstrings.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_docstrings.py
|
Apache-2.0
|
def clean_doctest_list(doctest_file: str, overwrite: bool = False):
"""
Cleans the doctest in a given file.
Args:
doctest_file (`str`):
The path to the doctest file to check or clean.
overwrite (`bool`, *optional*, defaults to `False`):
Whether or not to fix problems. If `False`, will error when the file is not clean.
"""
non_existent_paths = []
all_paths = []
with open(doctest_file, "r", encoding="utf-8") as f:
for line in f:
line = line.strip().split(" ")[0]
path = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(line)
if len(non_existent_paths) > 0:
non_existent_paths = "\n".join([f"- {f}" for f in non_existent_paths])
raise ValueError(f"`{doctest_file}` contains non-existent paths:\n{non_existent_paths}")
sorted_paths = sorted(all_paths)
if all_paths != sorted_paths:
if not overwrite:
raise ValueError(
f"Files in `{doctest_file}` are not in alphabetical order, run `make fix-copies` to fix "
"this automatically."
)
with open(doctest_file, "w", encoding="utf-8") as f:
f.write("\n".join(sorted_paths) + "\n")
|
Cleans the doctest in a given file.
Args:
doctest_file (`str`):
The path to the doctest file to check or clean.
overwrite (`bool`, *optional*, defaults to `False`):
Whether or not to fix problems. If `False`, will error when the file is not clean.
|
clean_doctest_list
|
python
|
huggingface/transformers
|
utils/check_doctest_list.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_doctest_list.py
|
Apache-2.0
|
def clean_model_doc_toc(model_doc: List[dict]) -> List[dict]:
"""
Cleans a section of the table of content of the model documentation (one specific modality) by removing duplicates
and sorting models alphabetically.
Args:
model_doc (`List[dict]`):
The list of dictionaries extracted from the `_toctree.yml` file for this specific modality.
Returns:
`List[dict]`: List of dictionaries like the input, but cleaned up and sorted.
"""
counts = defaultdict(int)
for doc in model_doc:
counts[doc["local"]] += 1
duplicates = [key for key, value in counts.items() if value > 1]
new_doc = []
for duplicate_key in duplicates:
titles = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key})
if len(titles) > 1:
raise ValueError(
f"{duplicate_key} is present several times in the documentation table of content at "
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others."
)
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]})
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1])
# Sort
return sorted(new_doc, key=lambda s: s["title"].lower())
|
Cleans a section of the table of content of the model documentation (one specific modality) by removing duplicates
and sorting models alphabetically.
Args:
model_doc (`List[dict]`):
The list of dictionaries extracted from the `_toctree.yml` file for this specific modality.
Returns:
`List[dict]`: List of dictionaries like the input, but cleaned up and sorted.
|
clean_model_doc_toc
|
python
|
huggingface/transformers
|
utils/check_doc_toc.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_doc_toc.py
|
Apache-2.0
|
def check_model_doc(overwrite: bool = False):
"""
Check that the content of the table of content in `_toctree.yml` is clean (no duplicates and sorted for the model
API doc) and potentially auto-cleans it.
Args:
overwrite (`bool`, *optional*, defaults to `False`):
Whether to just check if the TOC is clean or to auto-clean it (when `overwrite=True`).
"""
with open(PATH_TO_TOC, encoding="utf-8") as f:
content = yaml.safe_load(f.read())
# Get to the API doc
api_idx = 0
while content[api_idx]["title"] != "API":
api_idx += 1
api_doc = content[api_idx]["sections"]
# Then to the model doc
model_idx = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
model_doc = api_doc[model_idx]["sections"]
# Extract the modalities and clean them one by one.
modalities_docs = [(idx, section) for idx, section in enumerate(model_doc) if "sections" in section]
diff = False
for idx, modality_doc in modalities_docs:
old_modality_doc = modality_doc["sections"]
new_modality_doc = clean_model_doc_toc(old_modality_doc)
if old_modality_doc != new_modality_doc:
diff = True
if overwrite:
model_doc[idx]["sections"] = new_modality_doc
if diff:
if overwrite:
api_doc[model_idx]["sections"] = model_doc
content[api_idx]["sections"] = api_doc
with open(PATH_TO_TOC, "w", encoding="utf-8") as f:
f.write(yaml.dump(content, allow_unicode=True))
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this."
)
|
Check that the content of the table of content in `_toctree.yml` is clean (no duplicates and sorted for the model
API doc) and potentially auto-cleans it.
Args:
overwrite (`bool`, *optional*, defaults to `False`):
Whether to just check if the TOC is clean or to auto-clean it (when `overwrite=True`).
|
check_model_doc
|
python
|
huggingface/transformers
|
utils/check_doc_toc.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_doc_toc.py
|
Apache-2.0
|
def find_backend(line: str) -> Optional[str]:
"""
Find one (or multiple) backend in a code line of the init.
Args:
line (`str`): A code line in an init file.
Returns:
Optional[`str`]: If one (or several) backend is found, returns it. In the case of multiple backends (the line
contains `if is_xxx_available() and `is_yyy_available()`) returns all backends joined on `_and_` (so
`xxx_and_yyy` for instance).
"""
if _re_test_backend.search(line) is None:
return None
backends = [b[0] for b in _re_backend.findall(line)]
backends.sort()
return "_and_".join(backends)
|
Find one (or multiple) backend in a code line of the init.
Args:
line (`str`): A code line in an init file.
Returns:
Optional[`str`]: If one (or several) backend is found, returns it. In the case of multiple backends (the line
contains `if is_xxx_available() and `is_yyy_available()`) returns all backends joined on `_and_` (so
`xxx_and_yyy` for instance).
|
find_backend
|
python
|
huggingface/transformers
|
utils/check_dummies.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_dummies.py
|
Apache-2.0
|
def read_init() -> Dict[str, List[str]]:
"""
Read the init and extract backend-specific objects.
Returns:
Dict[str, List[str]]: A dictionary mapping backend name to the list of object names requiring that backend.
"""
with open(os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"), "r", encoding="utf-8", newline="\n") as f:
lines = f.readlines()
# Get to the point we do the actual imports for type checking
line_index = 0
while not lines[line_index].startswith("if TYPE_CHECKING"):
line_index += 1
backend_specific_objects = {}
# Go through the end of the file
while line_index < len(lines):
# If the line is an if is_backend_available, we grab all objects associated.
backend = find_backend(lines[line_index])
if backend is not None:
while not lines[line_index].startswith(" else:"):
line_index += 1
line_index += 1
objects = []
# Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(" " * 8):
line = lines[line_index]
single_line_import_search = _re_single_line_import.search(line)
if single_line_import_search is not None:
# Single-line imports
objects.extend(single_line_import_search.groups()[0].split(", "))
elif line.startswith(" " * 12):
# Multiple-line imports (with 3 indent level)
objects.append(line[12:-2])
line_index += 1
backend_specific_objects[backend] = objects
else:
line_index += 1
return backend_specific_objects
|
Read the init and extract backend-specific objects.
Returns:
Dict[str, List[str]]: A dictionary mapping backend name to the list of object names requiring that backend.
|
read_init
|
python
|
huggingface/transformers
|
utils/check_dummies.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_dummies.py
|
Apache-2.0
|
def create_dummy_object(name: str, backend_name: str) -> str:
"""
Create the code for a dummy object.
Args:
name (`str`): The name of the object.
backend_name (`str`): The name of the backend required for that object.
Returns:
`str`: The code of the dummy object.
"""
if name.isupper():
return DUMMY_CONSTANT.format(name)
elif name.islower():
return DUMMY_FUNCTION.format(name, backend_name)
else:
return DUMMY_CLASS.format(name, backend_name)
|
Create the code for a dummy object.
Args:
name (`str`): The name of the object.
backend_name (`str`): The name of the backend required for that object.
Returns:
`str`: The code of the dummy object.
|
create_dummy_object
|
python
|
huggingface/transformers
|
utils/check_dummies.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_dummies.py
|
Apache-2.0
|
def create_dummy_files(backend_specific_objects: Optional[Dict[str, List[str]]] = None) -> Dict[str, str]:
"""
Create the content of the dummy files.
Args:
backend_specific_objects (`Dict[str, List[str]]`, *optional*):
The mapping backend name to list of backend-specific objects. If not passed, will be obtained by calling
`read_init()`.
Returns:
`Dict[str, str]`: A dictionary mapping backend name to code of the corresponding backend file.
"""
if backend_specific_objects is None:
backend_specific_objects = read_init()
dummy_files = {}
for backend, objects in backend_specific_objects.items():
backend_name = "[" + ", ".join(f'"{b}"' for b in backend.split("_and_")) + "]"
dummy_file = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(o, backend_name) for o in objects])
dummy_files[backend] = dummy_file
return dummy_files
|
Create the content of the dummy files.
Args:
backend_specific_objects (`Dict[str, List[str]]`, *optional*):
The mapping backend name to list of backend-specific objects. If not passed, will be obtained by calling
`read_init()`.
Returns:
`Dict[str, str]`: A dictionary mapping backend name to code of the corresponding backend file.
|
create_dummy_files
|
python
|
huggingface/transformers
|
utils/check_dummies.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_dummies.py
|
Apache-2.0
|
def check_dummies(overwrite: bool = False):
"""
Check if the dummy files are up to date and maybe `overwrite` with the right content.
Args:
overwrite (`bool`, *optional*, default to `False`):
Whether or not to overwrite the content of the dummy files. Will raise an error if they are not up to date
when `overwrite=False`.
"""
dummy_files = create_dummy_files()
# For special correspondence backend name to shortcut as used in utils/dummy_xxx_objects.py
short_names = {"torch": "pt"}
# Locate actual dummy modules and read their content.
path = os.path.join(PATH_TO_TRANSFORMERS, "utils")
dummy_file_paths = {
backend: os.path.join(path, f"dummy_{short_names.get(backend, backend)}_objects.py")
for backend in dummy_files.keys()
}
actual_dummies = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(file_path):
with open(file_path, "r", encoding="utf-8", newline="\n") as f:
actual_dummies[backend] = f.read()
else:
actual_dummies[backend] = ""
# Compare actual with what they should be.
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f"Updating transformers.utils.dummy_{short_names.get(backend, backend)}_objects.py as the main "
"__init__ has new objects."
)
with open(dummy_file_paths[backend], "w", encoding="utf-8", newline="\n") as f:
f.write(dummy_files[backend])
else:
# Temporary fix to help people identify which objects introduced are not correctly protected.
found = False
for _actual, _dummy in zip(
actual_dummies["torch"].split("class"), dummy_files["torch"].split("class")
):
if _actual != _dummy:
actual_broken = _actual
dummy_broken = _dummy
found = True
break
if not found:
print("A transient error was found with the dummies, please investigate.")
continue
raise ValueError(
"The main __init__ has objects that are not present in "
f"transformers.utils.dummy_{short_names.get(backend, backend)}_objects.py.\n"
f" It is likely the following objects are responsible, see these excerpts: \n"
f"---------------------------------- Actual -------------------------------------\n"
f" \n {actual_broken} \n"
f"---------------------------------- Dummy -------------------------------------\n"
f" \n {dummy_broken} \n"
"Run `make fix-copies` to fix this."
)
|
Check if the dummy files are up to date and maybe `overwrite` with the right content.
Args:
overwrite (`bool`, *optional*, default to `False`):
Whether or not to overwrite the content of the dummy files. Will raise an error if they are not up to date
when `overwrite=False`.
|
check_dummies
|
python
|
huggingface/transformers
|
utils/check_dummies.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_dummies.py
|
Apache-2.0
|
def find_backend(line: str) -> Optional[str]:
"""
Find one (or multiple) backend in a code line of the init.
Args:
line (`str`): A code line of the main init.
Returns:
Optional[`str`]: If one (or several) backend is found, returns it. In the case of multiple backends (the line
contains `if is_xxx_available() and `is_yyy_available()`) returns all backends joined on `_and_` (so
`xxx_and_yyy` for instance).
"""
if _re_test_backend.search(line) is None:
return None
backends = [b[0] for b in _re_backend.findall(line)]
backends.sort()
return "_and_".join(backends)
|
Find one (or multiple) backend in a code line of the init.
Args:
line (`str`): A code line of the main init.
Returns:
Optional[`str`]: If one (or several) backend is found, returns it. In the case of multiple backends (the line
contains `if is_xxx_available() and `is_yyy_available()`) returns all backends joined on `_and_` (so
`xxx_and_yyy` for instance).
|
find_backend
|
python
|
huggingface/transformers
|
utils/check_inits.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_inits.py
|
Apache-2.0
|
def parse_init(init_file) -> Optional[Tuple[Dict[str, List[str]], Dict[str, List[str]]]]:
"""
Read an init_file and parse (per backend) the `_import_structure` objects defined and the `TYPE_CHECKING` objects
defined.
Args:
init_file (`str`): Path to the init file to inspect.
Returns:
`Optional[Tuple[Dict[str, List[str]], Dict[str, List[str]]]]`: A tuple of two dictionaries mapping backends to list of
imported objects, one for the `_import_structure` part of the init and one for the `TYPE_CHECKING` part of the
init. Returns `None` if the init is not a custom init.
"""
with open(init_file, "r", encoding="utf-8", newline="\n") as f:
lines = f.readlines()
# Get the to `_import_structure` definition.
line_index = 0
while line_index < len(lines) and not lines[line_index].startswith("_import_structure = {"):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lines):
return None
# First grab the objects without a specific backend in _import_structure
objects = []
while not lines[line_index].startswith("if TYPE_CHECKING") and find_backend(lines[line_index]) is None:
line = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(line):
content = _re_one_line_import_struct.search(line).groups()[0]
imports = re.findall(r"\[([^\]]+)\]", content)
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", ")])
line_index += 1
continue
single_line_import_search = _re_import_struct_key_value.search(line)
if single_line_import_search is not None:
imports = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", ") if len(obj) > 0]
objects.extend(imports)
elif line.startswith(" " * 8 + '"'):
objects.append(line[9:-3])
line_index += 1
# Those are stored with the key "none".
import_dict_objects = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING"):
# If the line is an if not is_backend_available, we grab all objects associated.
backend = find_backend(lines[line_index])
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1]) is None:
backend = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index]) is None:
line_index += 1
line_index += 1
objects = []
# Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(" " * 4):
line = lines[line_index]
if _re_import_struct_add_one.search(line) is not None:
objects.append(_re_import_struct_add_one.search(line).groups()[0])
elif _re_import_struct_add_many.search(line) is not None:
imports = _re_import_struct_add_many.search(line).groups()[0].split(", ")
imports = [obj[1:-1] for obj in imports if len(obj) > 0]
objects.extend(imports)
elif _re_between_brackets.search(line) is not None:
imports = _re_between_brackets.search(line).groups()[0].split(", ")
imports = [obj[1:-1] for obj in imports if len(obj) > 0]
objects.extend(imports)
elif _re_quote_object.search(line) is not None:
objects.append(_re_quote_object.search(line).groups()[0])
elif line.startswith(" " * 8 + '"'):
objects.append(line[9:-3])
elif line.startswith(" " * 12 + '"'):
objects.append(line[13:-3])
line_index += 1
import_dict_objects[backend] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
objects = []
while (
line_index < len(lines)
and find_backend(lines[line_index]) is None
and not lines[line_index].startswith("else")
):
line = lines[line_index]
single_line_import_search = _re_import.search(line)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", "))
elif line.startswith(" " * 8):
objects.append(line[8:-2])
line_index += 1
type_hint_objects = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(lines):
# If the line is an if is_backend_available, we grab all objects associated.
backend = find_backend(lines[line_index])
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1]) is None:
backend = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index]) is None:
line_index += 1
line_index += 1
objects = []
# Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(" " * 8):
line = lines[line_index]
single_line_import_search = _re_import.search(line)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", "))
elif line.startswith(" " * 12):
objects.append(line[12:-2])
line_index += 1
type_hint_objects[backend] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
|
Read an init_file and parse (per backend) the `_import_structure` objects defined and the `TYPE_CHECKING` objects
defined.
Args:
init_file (`str`): Path to the init file to inspect.
Returns:
`Optional[Tuple[Dict[str, List[str]], Dict[str, List[str]]]]`: A tuple of two dictionaries mapping backends to list of
imported objects, one for the `_import_structure` part of the init and one for the `TYPE_CHECKING` part of the
init. Returns `None` if the init is not a custom init.
|
parse_init
|
python
|
huggingface/transformers
|
utils/check_inits.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_inits.py
|
Apache-2.0
|
def analyze_results(import_dict_objects: Dict[str, List[str]], type_hint_objects: Dict[str, List[str]]) -> List[str]:
"""
Analyze the differences between _import_structure objects and TYPE_CHECKING objects found in an init.
Args:
import_dict_objects (`Dict[str, List[str]]`):
A dictionary mapping backend names (`"none"` for the objects independent of any specific backend) to
list of imported objects.
type_hint_objects (`Dict[str, List[str]]`):
A dictionary mapping backend names (`"none"` for the objects independent of any specific backend) to
list of imported objects.
Returns:
`List[str]`: The list of errors corresponding to mismatches.
"""
def find_duplicates(seq):
return [k for k, v in collections.Counter(seq).items() if v > 1]
# If one backend is missing from the other part of the init, error early.
if list(import_dict_objects.keys()) != list(type_hint_objects.keys()):
return ["Both sides of the init do not have the same backends!"]
errors = []
# Find all errors.
for key in import_dict_objects.keys():
# Duplicate imports in any half.
duplicate_imports = find_duplicates(import_dict_objects[key])
if duplicate_imports:
errors.append(f"Duplicate _import_structure definitions for: {duplicate_imports}")
duplicate_type_hints = find_duplicates(type_hint_objects[key])
if duplicate_type_hints:
errors.append(f"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}")
# Missing imports in either part of the init.
if sorted(set(import_dict_objects[key])) != sorted(set(type_hint_objects[key])):
name = "base imports" if key == "none" else f"{key} backend"
errors.append(f"Differences for {name}:")
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f" {a} in TYPE_HINT but not in _import_structure.")
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f" {a} in _import_structure but not in TYPE_HINT.")
return errors
|
Analyze the differences between _import_structure objects and TYPE_CHECKING objects found in an init.
Args:
import_dict_objects (`Dict[str, List[str]]`):
A dictionary mapping backend names (`"none"` for the objects independent of any specific backend) to
list of imported objects.
type_hint_objects (`Dict[str, List[str]]`):
A dictionary mapping backend names (`"none"` for the objects independent of any specific backend) to
list of imported objects.
Returns:
`List[str]`: The list of errors corresponding to mismatches.
|
analyze_results
|
python
|
huggingface/transformers
|
utils/check_inits.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_inits.py
|
Apache-2.0
|
def get_transformers_submodules() -> List[str]:
"""
Returns the list of Transformers submodules.
"""
submodules = []
for path, directories, files in os.walk(PATH_TO_TRANSFORMERS):
for folder in directories:
# Ignore private modules
if folder.startswith("_"):
directories.remove(folder)
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(path) / folder).glob("*.py"))) == 0:
continue
short_path = str((Path(path) / folder).relative_to(PATH_TO_TRANSFORMERS))
submodule = short_path.replace(os.path.sep, ".")
submodules.append(submodule)
for fname in files:
if fname == "__init__.py":
continue
short_path = str((Path(path) / fname).relative_to(PATH_TO_TRANSFORMERS))
submodule = short_path.replace(".py", "").replace(os.path.sep, ".")
if len(submodule.split(".")) == 1:
submodules.append(submodule)
return submodules
|
Returns the list of Transformers submodules.
|
get_transformers_submodules
|
python
|
huggingface/transformers
|
utils/check_inits.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_inits.py
|
Apache-2.0
|
def check_submodules():
"""
Check all submodules of Transformers are properly registered in the main init. Error otherwise.
"""
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
transformers = direct_transformers_import(PATH_TO_TRANSFORMERS)
import_structure_keys = set(transformers._import_structure.keys())
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"), "r") as f:
init_content = f.read()
import_structure_keys.update(set(re.findall(r"import_structure\[\"([^\"]*)\"\]", init_content)))
module_not_registered = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(module_not_registered) > 0:
list_of_modules = "\n".join(f"- {module}" for module in module_not_registered)
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
f"{list_of_modules}\n"
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value."
)
|
Check all submodules of Transformers are properly registered in the main init. Error otherwise.
|
check_submodules
|
python
|
huggingface/transformers
|
utils/check_inits.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_inits.py
|
Apache-2.0
|
def get_models_in_diff():
"""
Finds all models that have been modified in the diff.
Returns:
A set containing the names of the models that have been modified (e.g. {'llama', 'whisper'}).
"""
fork_point_sha = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
modified_files = (
subprocess.check_output(f"git diff --diff-filter=d --name-only {fork_point_sha}".split())
.decode("utf-8")
.split()
)
# Matches both modelling files and tests
relevant_modified_files = [x for x in modified_files if "/models/" in x and x.endswith(".py")]
model_names = set()
for file_path in relevant_modified_files:
model_name = file_path.split("/")[-2]
model_names.add(model_name)
return model_names
|
Finds all models that have been modified in the diff.
Returns:
A set containing the names of the models that have been modified (e.g. {'llama', 'whisper'}).
|
get_models_in_diff
|
python
|
huggingface/transformers
|
utils/check_modular_conversion.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_modular_conversion.py
|
Apache-2.0
|
def guaranteed_no_diff(modular_file_path, dependencies, models_in_diff):
"""
Returns whether it is guaranteed to have no differences between the modular file and the modeling file.
Model is in the diff -> not guaranteed to have no differences
Dependency is in the diff -> not guaranteed to have no differences
Otherwise -> guaranteed to have no differences
Args:
modular_file_path: The path to the modular file.
dependencies: A dictionary containing the dependencies of each modular file.
models_in_diff: A set containing the names of the models that have been modified.
Returns:
A boolean indicating whether the model (code and tests) is guaranteed to have no differences.
"""
model_name = modular_file_path.rsplit("modular_", 1)[1].replace(".py", "")
if model_name in models_in_diff:
return False
for dep in dependencies[modular_file_path]:
# two possible patterns: `transformers.models.model_name.(...)` or `model_name.(...)`
dependency_model_name = dep.split(".")[-2]
if dependency_model_name in models_in_diff:
return False
return True
|
Returns whether it is guaranteed to have no differences between the modular file and the modeling file.
Model is in the diff -> not guaranteed to have no differences
Dependency is in the diff -> not guaranteed to have no differences
Otherwise -> guaranteed to have no differences
Args:
modular_file_path: The path to the modular file.
dependencies: A dictionary containing the dependencies of each modular file.
models_in_diff: A set containing the names of the models that have been modified.
Returns:
A boolean indicating whether the model (code and tests) is guaranteed to have no differences.
|
guaranteed_no_diff
|
python
|
huggingface/transformers
|
utils/check_modular_conversion.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_modular_conversion.py
|
Apache-2.0
|
def check_missing_backends():
"""
Checks if all backends are installed (otherwise the check of this script is incomplete). Will error in the CI if
that's not the case but only throw a warning for users running this.
"""
missing_backends = []
if not is_torch_available():
missing_backends.append("PyTorch")
if not is_tf_available():
missing_backends.append("TensorFlow")
if not is_flax_available():
missing_backends.append("Flax")
if len(missing_backends) > 0:
missing = ", ".join(missing_backends)
if os.getenv("TRANSFORMERS_IS_CI", "").upper() in ENV_VARS_TRUE_VALUES:
raise Exception(
"Full repo consistency checks require all backends to be installed (with `pip install -e '.[dev]'` in the "
f"Transformers repo, the following are missing: {missing}."
)
else:
warnings.warn(
"Full repo consistency checks require all backends to be installed (with `pip install -e '.[dev]'` in the "
f"Transformers repo, the following are missing: {missing}. While it's probably fine as long as you "
"didn't make any change in one of those backends modeling files, you should probably execute the "
"command above to be on the safe side."
)
|
Checks if all backends are installed (otherwise the check of this script is incomplete). Will error in the CI if
that's not the case but only throw a warning for users running this.
|
check_missing_backends
|
python
|
huggingface/transformers
|
utils/check_repo.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_repo.py
|
Apache-2.0
|
def check_model_list():
"""
Checks the model listed as subfolders of `models` match the models available in `transformers.models`.
"""
# Get the models from the directory structure of `src/transformers/models/`
import transformers as tfrs
models_dir = os.path.join(PATH_TO_TRANSFORMERS, "models")
_models = []
for model in os.listdir(models_dir):
if model == "deprecated":
continue
model_dir = os.path.join(models_dir, model)
if os.path.isdir(model_dir) and "__init__.py" in os.listdir(model_dir):
# If the init is empty, and there are only two files, it's likely that there's just a conversion
# script. Those should not be in the init.
if (Path(model_dir) / "__init__.py").read_text().strip() == "":
continue
_models.append(model)
# Get the models in the submodule `transformers.models`
models = [model for model in dir(tfrs.models) if not model.startswith("__")]
missing_models = sorted(set(_models).difference(models))
if missing_models:
raise Exception(
f"The following models should be included in {models_dir}/__init__.py: {','.join(missing_models)}."
)
|
Checks the model listed as subfolders of `models` match the models available in `transformers.models`.
|
check_model_list
|
python
|
huggingface/transformers
|
utils/check_repo.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_repo.py
|
Apache-2.0
|
def get_model_modules() -> List[str]:
"""Get all the model modules inside the transformers library (except deprecated models)."""
_ignore_modules = [
"modeling_auto",
"modeling_encoder_decoder",
"modeling_marian",
"modeling_retribert",
"modeling_flax_auto",
"modeling_flax_encoder_decoder",
"modeling_speech_encoder_decoder",
"modeling_flax_speech_encoder_decoder",
"modeling_flax_vision_encoder_decoder",
"modeling_timm_backbone",
"modeling_tf_auto",
"modeling_tf_encoder_decoder",
"modeling_tf_vision_encoder_decoder",
"modeling_vision_encoder_decoder",
]
modules = []
for model in dir(transformers.models):
# There are some magic dunder attributes in the dir, we ignore them
if "deprecated" in model or model.startswith("__"):
continue
model_module = getattr(transformers.models, model)
for submodule in dir(model_module):
if submodule.startswith("modeling") and submodule not in _ignore_modules:
modeling_module = getattr(model_module, submodule)
modules.append(modeling_module)
return modules
|
Get all the model modules inside the transformers library (except deprecated models).
|
get_model_modules
|
python
|
huggingface/transformers
|
utils/check_repo.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_repo.py
|
Apache-2.0
|
def get_models(module: types.ModuleType, include_pretrained: bool = False) -> List[Tuple[str, type]]:
"""
Get the objects in a module that are models.
Args:
module (`types.ModuleType`):
The module from which we are extracting models.
include_pretrained (`bool`, *optional*, defaults to `False`):
Whether or not to include the `PreTrainedModel` subclass (like `BertPreTrainedModel`) or not.
Returns:
List[Tuple[str, type]]: List of models as tuples (class name, actual class).
"""
models = []
model_classes = (transformers.PreTrainedModel, transformers.TFPreTrainedModel, transformers.FlaxPreTrainedModel)
for attr_name in dir(module):
if not include_pretrained and ("Pretrained" in attr_name or "PreTrained" in attr_name):
continue
attr = getattr(module, attr_name)
if isinstance(attr, type) and issubclass(attr, model_classes) and attr.__module__ == module.__name__:
models.append((attr_name, attr))
return models
|
Get the objects in a module that are models.
Args:
module (`types.ModuleType`):
The module from which we are extracting models.
include_pretrained (`bool`, *optional*, defaults to `False`):
Whether or not to include the `PreTrainedModel` subclass (like `BertPreTrainedModel`) or not.
Returns:
List[Tuple[str, type]]: List of models as tuples (class name, actual class).
|
get_models
|
python
|
huggingface/transformers
|
utils/check_repo.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_repo.py
|
Apache-2.0
|
def is_building_block(model: str) -> bool:
"""
Returns `True` if a model is a building block part of a bigger model.
"""
if model.endswith("Wrapper"):
return True
if model.endswith("Encoder"):
return True
if model.endswith("Decoder"):
return True
if model.endswith("Prenet"):
return True
|
Returns `True` if a model is a building block part of a bigger model.
|
is_building_block
|
python
|
huggingface/transformers
|
utils/check_repo.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_repo.py
|
Apache-2.0
|
def is_a_private_model(model: str) -> bool:
"""Returns `True` if the model should not be in the main init."""
if model in PRIVATE_MODELS:
return True
return is_building_block(model)
|
Returns `True` if the model should not be in the main init.
|
is_a_private_model
|
python
|
huggingface/transformers
|
utils/check_repo.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_repo.py
|
Apache-2.0
|
def check_models_are_in_init():
"""Checks all models defined in the library are in the main init."""
models_not_in_init = []
dir_transformers = dir(transformers)
for module in get_model_modules():
models_not_in_init += [
model[0] for model in get_models(module, include_pretrained=True) if model[0] not in dir_transformers
]
# Remove private models
models_not_in_init = [model for model in models_not_in_init if not is_a_private_model(model)]
if len(models_not_in_init) > 0:
raise Exception(f"The following models should be in the main init: {','.join(models_not_in_init)}.")
|
Checks all models defined in the library are in the main init.
|
check_models_are_in_init
|
python
|
huggingface/transformers
|
utils/check_repo.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_repo.py
|
Apache-2.0
|
def get_model_test_files() -> List[str]:
"""
Get the model test files.
Returns:
`List[str]`: The list of test files. The returned files will NOT contain the `tests` (i.e. `PATH_TO_TESTS`
defined in this script). They will be considered as paths relative to `tests`. A caller has to use
`os.path.join(PATH_TO_TESTS, ...)` to access the files.
"""
_ignore_files = [
"test_modeling_common",
"test_modeling_encoder_decoder",
"test_modeling_flax_encoder_decoder",
"test_modeling_flax_speech_encoder_decoder",
"test_modeling_marian",
"test_modeling_tf_common",
"test_modeling_tf_encoder_decoder",
]
test_files = []
model_test_root = os.path.join(PATH_TO_TESTS, "models")
model_test_dirs = []
for x in os.listdir(model_test_root):
x = os.path.join(model_test_root, x)
if os.path.isdir(x):
model_test_dirs.append(x)
for target_dir in [PATH_TO_TESTS] + model_test_dirs:
for file_or_dir in os.listdir(target_dir):
path = os.path.join(target_dir, file_or_dir)
if os.path.isfile(path):
filename = os.path.split(path)[-1]
if "test_modeling" in filename and os.path.splitext(filename)[0] not in _ignore_files:
file = os.path.join(*path.split(os.sep)[1:])
test_files.append(file)
return test_files
|
Get the model test files.
Returns:
`List[str]`: The list of test files. The returned files will NOT contain the `tests` (i.e. `PATH_TO_TESTS`
defined in this script). They will be considered as paths relative to `tests`. A caller has to use
`os.path.join(PATH_TO_TESTS, ...)` to access the files.
|
get_model_test_files
|
python
|
huggingface/transformers
|
utils/check_repo.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_repo.py
|
Apache-2.0
|
def find_tested_models(test_file: str) -> List[str]:
"""
Parse the content of test_file to detect what's in `all_model_classes`. This detects the models that inherit from
the common test class.
Args:
test_file (`str`): The path to the test file to check
Returns:
`List[str]`: The list of models tested in that file.
"""
with open(os.path.join(PATH_TO_TESTS, test_file), "r", encoding="utf-8", newline="\n") as f:
content = f.read()
all_models = re.findall(r"all_model_classes\s+=\s+\(\s*\(([^\)]*)\)", content)
# Check with one less parenthesis as well
all_models += re.findall(r"all_model_classes\s+=\s+\(([^\)]*)\)", content)
if len(all_models) > 0:
model_tested = []
for entry in all_models:
for line in entry.split(","):
name = line.strip()
if len(name) > 0:
model_tested.append(name)
return model_tested
|
Parse the content of test_file to detect what's in `all_model_classes`. This detects the models that inherit from
the common test class.
Args:
test_file (`str`): The path to the test file to check
Returns:
`List[str]`: The list of models tested in that file.
|
find_tested_models
|
python
|
huggingface/transformers
|
utils/check_repo.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_repo.py
|
Apache-2.0
|
def should_be_tested(model_name: str) -> bool:
"""
Whether or not a model should be tested.
"""
if model_name in IGNORE_NON_TESTED:
return False
return not is_building_block(model_name)
|
Whether or not a model should be tested.
|
should_be_tested
|
python
|
huggingface/transformers
|
utils/check_repo.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_repo.py
|
Apache-2.0
|
def check_models_are_tested(module: types.ModuleType, test_file: str) -> List[str]:
"""Check models defined in a module are all tested in a given file.
Args:
module (`types.ModuleType`): The module in which we get the models.
test_file (`str`): The path to the file where the module is tested.
Returns:
`List[str]`: The list of error messages corresponding to models not tested.
"""
# XxxPreTrainedModel are not tested
defined_models = get_models(module)
tested_models = find_tested_models(test_file)
if tested_models is None:
if test_file.replace(os.path.sep, "/") in TEST_FILES_WITH_NO_COMMON_TESTS:
return
return [
f"{test_file} should define `all_model_classes` to apply common tests to the models it tests. "
+ "If this intentional, add the test filename to `TEST_FILES_WITH_NO_COMMON_TESTS` in the file "
+ "`utils/check_repo.py`."
]
failures = []
for model_name, _ in defined_models:
if model_name not in tested_models and should_be_tested(model_name):
failures.append(
f"{model_name} is defined in {module.__name__} but is not tested in "
+ f"{os.path.join(PATH_TO_TESTS, test_file)}. Add it to the all_model_classes in that file."
+ "If common tests should not applied to that model, add its name to `IGNORE_NON_TESTED`"
+ "in the file `utils/check_repo.py`."
)
return failures
|
Check models defined in a module are all tested in a given file.
Args:
module (`types.ModuleType`): The module in which we get the models.
test_file (`str`): The path to the file where the module is tested.
Returns:
`List[str]`: The list of error messages corresponding to models not tested.
|
check_models_are_tested
|
python
|
huggingface/transformers
|
utils/check_repo.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_repo.py
|
Apache-2.0
|
def check_all_models_are_tested():
"""Check all models are properly tested."""
modules = get_model_modules()
test_files = get_model_test_files()
failures = []
for module in modules:
# Matches a module to its test file.
test_file = [file for file in test_files if f"test_{module.__name__.split('.')[-1]}.py" in file]
if len(test_file) == 0:
failures.append(f"{module.__name__} does not have its corresponding test file {test_file}.")
elif len(test_file) > 1:
failures.append(f"{module.__name__} has several test files: {test_file}.")
else:
test_file = test_file[0]
new_failures = check_models_are_tested(module, test_file)
if new_failures is not None:
failures += new_failures
if len(failures) > 0:
raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures))
|
Check all models are properly tested.
|
check_all_models_are_tested
|
python
|
huggingface/transformers
|
utils/check_repo.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_repo.py
|
Apache-2.0
|
def get_all_auto_configured_models() -> List[str]:
"""Return the list of all models in at least one auto class."""
result = set() # To avoid duplicates we concatenate all model classes in a set.
if is_torch_available():
for attr_name in dir(transformers.models.auto.modeling_auto):
if attr_name.startswith("MODEL_") and attr_name.endswith("MAPPING_NAMES"):
result = result | set(get_values(getattr(transformers.models.auto.modeling_auto, attr_name)))
if is_tf_available():
for attr_name in dir(transformers.models.auto.modeling_tf_auto):
if attr_name.startswith("TF_MODEL_") and attr_name.endswith("MAPPING_NAMES"):
result = result | set(get_values(getattr(transformers.models.auto.modeling_tf_auto, attr_name)))
if is_flax_available():
for attr_name in dir(transformers.models.auto.modeling_flax_auto):
if attr_name.startswith("FLAX_MODEL_") and attr_name.endswith("MAPPING_NAMES"):
result = result | set(get_values(getattr(transformers.models.auto.modeling_flax_auto, attr_name)))
return list(result)
|
Return the list of all models in at least one auto class.
|
get_all_auto_configured_models
|
python
|
huggingface/transformers
|
utils/check_repo.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_repo.py
|
Apache-2.0
|
def ignore_unautoclassed(model_name: str) -> bool:
"""Rules to determine if a model should be in an auto class."""
# Special white list
if model_name in IGNORE_NON_AUTO_CONFIGURED:
return True
# Encoder and Decoder should be ignored
if "Encoder" in model_name or "Decoder" in model_name:
return True
return False
|
Rules to determine if a model should be in an auto class.
|
ignore_unautoclassed
|
python
|
huggingface/transformers
|
utils/check_repo.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_repo.py
|
Apache-2.0
|
def check_models_are_auto_configured(module: types.ModuleType, all_auto_models: List[str]) -> List[str]:
"""
Check models defined in module are each in an auto class.
Args:
module (`types.ModuleType`):
The module in which we get the models.
all_auto_models (`List[str]`):
The list of all models in an auto class (as obtained with `get_all_auto_configured_models()`).
Returns:
`List[str]`: The list of error messages corresponding to models not tested.
"""
defined_models = get_models(module)
failures = []
for model_name, _ in defined_models:
if model_name not in all_auto_models and not ignore_unautoclassed(model_name):
failures.append(
f"{model_name} is defined in {module.__name__} but is not present in any of the auto mapping. "
"If that is intended behavior, add its name to `IGNORE_NON_AUTO_CONFIGURED` in the file "
"`utils/check_repo.py`."
)
return failures
|
Check models defined in module are each in an auto class.
Args:
module (`types.ModuleType`):
The module in which we get the models.
all_auto_models (`List[str]`):
The list of all models in an auto class (as obtained with `get_all_auto_configured_models()`).
Returns:
`List[str]`: The list of error messages corresponding to models not tested.
|
check_models_are_auto_configured
|
python
|
huggingface/transformers
|
utils/check_repo.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_repo.py
|
Apache-2.0
|
def check_all_models_are_auto_configured():
"""Check all models are each in an auto class."""
# This is where we need to check we have all backends or the check is incomplete.
check_missing_backends()
modules = get_model_modules()
all_auto_models = get_all_auto_configured_models()
failures = []
for module in modules:
new_failures = check_models_are_auto_configured(module, all_auto_models)
if new_failures is not None:
failures += new_failures
if len(failures) > 0:
raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures))
|
Check all models are each in an auto class.
|
check_all_models_are_auto_configured
|
python
|
huggingface/transformers
|
utils/check_repo.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_repo.py
|
Apache-2.0
|
def check_all_auto_object_names_being_defined():
"""Check all names defined in auto (name) mappings exist in the library."""
# This is where we need to check we have all backends or the check is incomplete.
check_missing_backends()
failures = []
mappings_to_check = {
"TOKENIZER_MAPPING_NAMES": TOKENIZER_MAPPING_NAMES,
"IMAGE_PROCESSOR_MAPPING_NAMES": IMAGE_PROCESSOR_MAPPING_NAMES,
"FEATURE_EXTRACTOR_MAPPING_NAMES": FEATURE_EXTRACTOR_MAPPING_NAMES,
"PROCESSOR_MAPPING_NAMES": PROCESSOR_MAPPING_NAMES,
}
# Each auto modeling files contains multiple mappings. Let's get them in a dynamic way.
for module_name in ["modeling_auto", "modeling_tf_auto", "modeling_flax_auto"]:
module = getattr(transformers.models.auto, module_name, None)
if module is None:
continue
# all mappings in a single auto modeling file
mapping_names = [x for x in dir(module) if x.endswith("_MAPPING_NAMES")]
mappings_to_check.update({name: getattr(module, name) for name in mapping_names})
for name, mapping in mappings_to_check.items():
for _, class_names in mapping.items():
if not isinstance(class_names, tuple):
class_names = (class_names,)
for class_name in class_names:
if class_name is None:
continue
# dummy object is accepted
if not hasattr(transformers, class_name):
# If the class name is in a model name mapping, let's not check if there is a definition in any modeling
# module, if it's a private model defined in this file.
if name.endswith("MODEL_MAPPING_NAMES") and is_a_private_model(class_name):
continue
if name.endswith("MODEL_FOR_IMAGE_MAPPING_NAMES") and is_a_private_model(class_name):
continue
failures.append(
f"`{class_name}` appears in the mapping `{name}` but it is not defined in the library."
)
if len(failures) > 0:
raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures))
|
Check all names defined in auto (name) mappings exist in the library.
|
check_all_auto_object_names_being_defined
|
python
|
huggingface/transformers
|
utils/check_repo.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_repo.py
|
Apache-2.0
|
def check_all_auto_mapping_names_in_config_mapping_names():
"""Check all keys defined in auto mappings (mappings of names) appear in `CONFIG_MAPPING_NAMES`."""
# This is where we need to check we have all backends or the check is incomplete.
check_missing_backends()
failures = []
# `TOKENIZER_PROCESSOR_MAPPING_NAMES` and `AutoTokenizer` is special, and don't need to follow the rule.
mappings_to_check = {
"IMAGE_PROCESSOR_MAPPING_NAMES": IMAGE_PROCESSOR_MAPPING_NAMES,
"FEATURE_EXTRACTOR_MAPPING_NAMES": FEATURE_EXTRACTOR_MAPPING_NAMES,
"PROCESSOR_MAPPING_NAMES": PROCESSOR_MAPPING_NAMES,
}
# Each auto modeling files contains multiple mappings. Let's get them in a dynamic way.
for module_name in ["modeling_auto", "modeling_tf_auto", "modeling_flax_auto"]:
module = getattr(transformers.models.auto, module_name, None)
if module is None:
continue
# all mappings in a single auto modeling file
mapping_names = [x for x in dir(module) if x.endswith("_MAPPING_NAMES")]
mappings_to_check.update({name: getattr(module, name) for name in mapping_names})
for name, mapping in mappings_to_check.items():
for model_type in mapping:
if model_type not in CONFIG_MAPPING_NAMES:
failures.append(
f"`{model_type}` appears in the mapping `{name}` but it is not defined in the keys of "
"`CONFIG_MAPPING_NAMES`."
)
if len(failures) > 0:
raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures))
|
Check all keys defined in auto mappings (mappings of names) appear in `CONFIG_MAPPING_NAMES`.
|
check_all_auto_mapping_names_in_config_mapping_names
|
python
|
huggingface/transformers
|
utils/check_repo.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_repo.py
|
Apache-2.0
|
def check_all_auto_mappings_importable():
"""Check all auto mappings can be imported."""
# This is where we need to check we have all backends or the check is incomplete.
check_missing_backends()
failures = []
mappings_to_check = {}
# Each auto modeling files contains multiple mappings. Let's get them in a dynamic way.
for module_name in ["modeling_auto", "modeling_tf_auto", "modeling_flax_auto"]:
module = getattr(transformers.models.auto, module_name, None)
if module is None:
continue
# all mappings in a single auto modeling file
mapping_names = [x for x in dir(module) if x.endswith("_MAPPING_NAMES")]
mappings_to_check.update({name: getattr(module, name) for name in mapping_names})
for name in mappings_to_check:
name = name.replace("_MAPPING_NAMES", "_MAPPING")
if not hasattr(transformers, name):
failures.append(f"`{name}`")
if len(failures) > 0:
raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures))
|
Check all auto mappings can be imported.
|
check_all_auto_mappings_importable
|
python
|
huggingface/transformers
|
utils/check_repo.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_repo.py
|
Apache-2.0
|
def check_objects_being_equally_in_main_init():
"""
Check if a (TensorFlow or Flax) object is in the main __init__ iif its counterpart in PyTorch is.
"""
attrs = dir(transformers)
failures = []
for attr in attrs:
obj = getattr(transformers, attr)
if hasattr(obj, "__module__") and isinstance(obj.__module__, ModuleSpec):
continue
if not hasattr(obj, "__module__") or "models.deprecated" in obj.__module__:
continue
module_path = obj.__module__
module_name = module_path.split(".")[-1]
module_dir = ".".join(module_path.split(".")[:-1])
if (
module_name.startswith("modeling_")
and not module_name.startswith("modeling_tf_")
and not module_name.startswith("modeling_flax_")
):
parent_module = sys.modules[module_dir]
frameworks = []
if is_tf_available():
frameworks.append("TF")
if is_flax_available():
frameworks.append("Flax")
for framework in frameworks:
other_module_path = module_path.replace("modeling_", f"modeling_{framework.lower()}_")
if os.path.isfile("src/" + other_module_path.replace(".", "/") + ".py"):
other_module_name = module_name.replace("modeling_", f"modeling_{framework.lower()}_")
other_module = getattr(parent_module, other_module_name)
if hasattr(other_module, f"{framework}{attr}"):
if not hasattr(transformers, f"{framework}{attr}"):
if f"{framework}{attr}" not in OBJECT_TO_SKIP_IN_MAIN_INIT_CHECK:
failures.append(f"{framework}{attr}")
if hasattr(other_module, f"{framework}_{attr}"):
if not hasattr(transformers, f"{framework}_{attr}"):
if f"{framework}_{attr}" not in OBJECT_TO_SKIP_IN_MAIN_INIT_CHECK:
failures.append(f"{framework}_{attr}")
if len(failures) > 0:
raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures))
|
Check if a (TensorFlow or Flax) object is in the main __init__ iif its counterpart in PyTorch is.
|
check_objects_being_equally_in_main_init
|
python
|
huggingface/transformers
|
utils/check_repo.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_repo.py
|
Apache-2.0
|
def check_decorator_order(filename: str) -> List[int]:
"""
Check that in a given test file, the slow decorator is always last.
Args:
filename (`str`): The path to a test file to check.
Returns:
`List[int]`: The list of failures as a list of indices where there are problems.
"""
with open(filename, "r", encoding="utf-8", newline="\n") as f:
lines = f.readlines()
decorator_before = None
errors = []
for i, line in enumerate(lines):
search = _re_decorator.search(line)
if search is not None:
decorator_name = search.groups()[0]
if decorator_before is not None and decorator_name.startswith("parameterized"):
errors.append(i)
decorator_before = decorator_name
elif decorator_before is not None:
decorator_before = None
return errors
|
Check that in a given test file, the slow decorator is always last.
Args:
filename (`str`): The path to a test file to check.
Returns:
`List[int]`: The list of failures as a list of indices where there are problems.
|
check_decorator_order
|
python
|
huggingface/transformers
|
utils/check_repo.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_repo.py
|
Apache-2.0
|
def check_all_decorator_order():
"""Check that in all test files, the slow decorator is always last."""
errors = []
for fname in os.listdir(PATH_TO_TESTS):
if fname.endswith(".py"):
filename = os.path.join(PATH_TO_TESTS, fname)
new_errors = check_decorator_order(filename)
errors += [f"- {filename}, line {i}" for i in new_errors]
if len(errors) > 0:
msg = "\n".join(errors)
raise ValueError(
"The parameterized decorator (and its variants) should always be first, but this is not the case in the"
f" following files:\n{msg}"
)
|
Check that in all test files, the slow decorator is always last.
|
check_all_decorator_order
|
python
|
huggingface/transformers
|
utils/check_repo.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_repo.py
|
Apache-2.0
|
def find_all_documented_objects() -> List[str]:
"""
Parse the content of all doc files to detect which classes and functions it documents.
Returns:
`List[str]`: The list of all object names being documented.
`Dict[str, List[str]]`: A dictionary mapping the object name (full import path, e.g.
`integrations.PeftAdapterMixin`) to its documented methods
"""
documented_obj = []
documented_methods_map = {}
for doc_file in Path(PATH_TO_DOC).glob("**/*.md"):
with open(doc_file, "r", encoding="utf-8", newline="\n") as f:
content = f.read()
raw_doc_objs = re.findall(r"\[\[autodoc\]\]\s+(\S+)\s+", content)
documented_obj += [obj.split(".")[-1] for obj in raw_doc_objs]
for obj in raw_doc_objs:
obj_public_methods = re.findall(rf"\[\[autodoc\]\] {obj}((\n\s+-.*)+)", content)
# Some objects have no methods documented
if len(obj_public_methods) == 0:
continue
else:
documented_methods_map[obj] = re.findall(r"(?<=-\s).*", obj_public_methods[0][0])
return documented_obj, documented_methods_map
|
Parse the content of all doc files to detect which classes and functions it documents.
Returns:
`List[str]`: The list of all object names being documented.
`Dict[str, List[str]]`: A dictionary mapping the object name (full import path, e.g.
`integrations.PeftAdapterMixin`) to its documented methods
|
find_all_documented_objects
|
python
|
huggingface/transformers
|
utils/check_repo.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_repo.py
|
Apache-2.0
|
def ignore_undocumented(name: str) -> bool:
"""Rules to determine if `name` should be undocumented (returns `True` if it should not be documented)."""
# NOT DOCUMENTED ON PURPOSE.
# Constants uppercase are not documented.
if name.isupper():
return True
# PreTrainedModels / Encoders / Decoders / Layers / Embeddings / Attention are not documented.
if (
name.endswith("PreTrainedModel")
or name.endswith("Decoder")
or name.endswith("Encoder")
or name.endswith("Layer")
or name.endswith("Embeddings")
or name.endswith("Attention")
or name.endswith("OnnxConfig")
):
return True
# Submodules are not documented.
if os.path.isdir(os.path.join(PATH_TO_TRANSFORMERS, name)) or os.path.isfile(
os.path.join(PATH_TO_TRANSFORMERS, f"{name}.py")
):
return True
# All load functions are not documented.
if name.startswith("load_tf") or name.startswith("load_pytorch"):
return True
# is_xxx_available functions are not documented.
if name.startswith("is_") and name.endswith("_available"):
return True
# Deprecated objects are not documented.
if name in DEPRECATED_OBJECTS or name in UNDOCUMENTED_OBJECTS:
return True
# MMBT model does not really work.
if name.startswith("MMBT"):
return True
if name in SHOULD_HAVE_THEIR_OWN_PAGE:
return True
return False
|
Rules to determine if `name` should be undocumented (returns `True` if it should not be documented).
|
ignore_undocumented
|
python
|
huggingface/transformers
|
utils/check_repo.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_repo.py
|
Apache-2.0
|
def check_all_objects_are_documented():
"""Check all models are properly documented."""
documented_objs, documented_methods_map = find_all_documented_objects()
modules = transformers._modules
objects = [c for c in dir(transformers) if c not in modules and not c.startswith("_")]
undocumented_objs = [c for c in objects if c not in documented_objs and not ignore_undocumented(c)]
if len(undocumented_objs) > 0:
raise Exception(
"The following objects are in the public init, but not in the docs:\n - " + "\n - ".join(undocumented_objs)
)
check_model_type_doc_match()
check_public_method_exists(documented_methods_map)
|
Check all models are properly documented.
|
check_all_objects_are_documented
|
python
|
huggingface/transformers
|
utils/check_repo.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_repo.py
|
Apache-2.0
|
def check_public_method_exists(documented_methods_map):
"""Check that all explicitly documented public methods are defined in the corresponding class."""
failures = []
for obj, methods in documented_methods_map.items():
# Let's ensure there is no repetition
if len(set(methods)) != len(methods):
failures.append(f"Error in the documentation of {obj}: there are repeated documented methods.")
# Navigates into the object, given the full import path
nested_path = obj.split(".")
submodule = transformers
if len(nested_path) > 1:
nested_submodules = nested_path[:-1]
for submodule_name in nested_submodules:
if submodule_name == "transformers":
continue
try:
submodule = getattr(submodule, submodule_name)
except AttributeError:
failures.append(f"Could not parse {submodule_name}. Are the required dependencies installed?")
continue
class_name = nested_path[-1]
try:
obj_class = getattr(submodule, class_name)
except AttributeError:
failures.append(f"Could not parse {class_name}. Are the required dependencies installed?")
continue
# Checks that all explicitly documented methods are defined in the class
for method in methods:
if method == "all": # Special keyword to document all public methods
continue
try:
if not hasattr(obj_class, method):
failures.append(
"The following public method is explicitly documented but not defined in the corresponding "
f"class. class: {obj}, method: {method}. If the method is defined, this error can be due to "
f"lacking dependencies."
)
except ImportError:
pass
if len(failures) > 0:
raise Exception("\n".join(failures))
|
Check that all explicitly documented public methods are defined in the corresponding class.
|
check_public_method_exists
|
python
|
huggingface/transformers
|
utils/check_repo.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_repo.py
|
Apache-2.0
|
def check_model_type_doc_match():
"""Check all doc pages have a corresponding model type."""
model_doc_folder = Path(PATH_TO_DOC) / "model_doc"
model_docs = [m.stem for m in model_doc_folder.glob("*.md")]
model_types = list(transformers.models.auto.configuration_auto.MODEL_NAMES_MAPPING.keys())
model_types = [MODEL_TYPE_TO_DOC_MAPPING[m] if m in MODEL_TYPE_TO_DOC_MAPPING else m for m in model_types]
errors = []
for m in model_docs:
if m not in model_types and m != "auto":
close_matches = get_close_matches(m, model_types)
error_message = f"{m} is not a proper model identifier."
if len(close_matches) > 0:
close_matches = "/".join(close_matches)
error_message += f" Did you mean {close_matches}?"
errors.append(error_message)
if len(errors) > 0:
raise ValueError(
"Some model doc pages do not match any existing model type:\n"
+ "\n".join(errors)
+ "\nYou can add any missing model type to the `MODEL_NAMES_MAPPING` constant in "
"models/auto/configuration_auto.py."
)
|
Check all doc pages have a corresponding model type.
|
check_model_type_doc_match
|
python
|
huggingface/transformers
|
utils/check_repo.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_repo.py
|
Apache-2.0
|
def check_deprecated_constant_is_up_to_date():
"""
Check if the constant `DEPRECATED_MODELS` in `models/auto/configuration_auto.py` is up to date.
"""
deprecated_folder = os.path.join(PATH_TO_TRANSFORMERS, "models", "deprecated")
deprecated_models = [m for m in os.listdir(deprecated_folder) if not m.startswith("_")]
constant_to_check = transformers.models.auto.configuration_auto.DEPRECATED_MODELS
message = []
missing_models = sorted(set(deprecated_models) - set(constant_to_check))
if len(missing_models) != 0:
missing_models = ", ".join(missing_models)
message.append(
"The following models are in the deprecated folder, make sure to add them to `DEPRECATED_MODELS` in "
f"`models/auto/configuration_auto.py`: {missing_models}."
)
extra_models = sorted(set(constant_to_check) - set(deprecated_models))
if len(extra_models) != 0:
extra_models = ", ".join(extra_models)
message.append(
"The following models are in the `DEPRECATED_MODELS` constant but not in the deprecated folder. Either "
f"remove them from the constant or move to the deprecated folder: {extra_models}."
)
if len(message) > 0:
raise Exception("\n".join(message))
|
Check if the constant `DEPRECATED_MODELS` in `models/auto/configuration_auto.py` is up to date.
|
check_deprecated_constant_is_up_to_date
|
python
|
huggingface/transformers
|
utils/check_repo.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_repo.py
|
Apache-2.0
|
def check_repo_quality():
"""Check all models are tested and documented."""
print("Repository-wide checks:")
print(" - checking all models are included.")
check_model_list()
print(" - checking all models are public.")
check_models_are_in_init()
print(" - checking all models have tests.")
check_all_decorator_order()
check_all_models_are_tested()
print(" - checking all objects have documentation.")
check_all_objects_are_documented()
print(" - checking all models are in at least one auto class.")
check_all_models_are_auto_configured()
print(" - checking all names in auto name mappings are defined.")
check_all_auto_object_names_being_defined()
print(" - checking all keys in auto name mappings are defined in `CONFIG_MAPPING_NAMES`.")
check_all_auto_mapping_names_in_config_mapping_names()
print(" - checking all auto mappings could be imported.")
check_all_auto_mappings_importable()
print(" - checking all objects are equally (across frameworks) in the main __init__.")
check_objects_being_equally_in_main_init()
print(" - checking the DEPRECATED_MODELS constant is up to date.")
check_deprecated_constant_is_up_to_date()
|
Check all models are tested and documented.
|
check_repo_quality
|
python
|
huggingface/transformers
|
utils/check_repo.py
|
https://github.com/huggingface/transformers/blob/master/utils/check_repo.py
|
Apache-2.0
|
def find_priority_list(py_files):
"""
Given a list of modular files, sorts them by topological order. Modular models that DON'T depend on other modular
models will be higher in the topological order.
Args:
py_files: List of paths to the modular files
Returns:
A tuple with the ordered files (list) and their dependencies (dict)
"""
dependencies = map_dependencies(py_files)
ordered_files = topological_sort(dependencies)
return ordered_files, dependencies
|
Given a list of modular files, sorts them by topological order. Modular models that DON'T depend on other modular
models will be higher in the topological order.
Args:
py_files: List of paths to the modular files
Returns:
A tuple with the ordered files (list) and their dependencies (dict)
|
find_priority_list
|
python
|
huggingface/transformers
|
utils/create_dependency_mapping.py
|
https://github.com/huggingface/transformers/blob/master/utils/create_dependency_mapping.py
|
Apache-2.0
|
def get_processor_types_from_config_class(config_class, allowed_mappings=None):
"""Return a tuple of processors for `config_class`.
We use `tuple` here to include (potentially) both slow & fast tokenizers.
"""
# To make a uniform return type
def _to_tuple(x):
if not isinstance(x, collections.abc.Sequence):
x = (x,)
else:
x = tuple(x)
return x
if allowed_mappings is None:
allowed_mappings = ["processor", "tokenizer", "image_processor", "feature_extractor"]
processor_types = ()
# Check first if a model has `ProcessorMixin`. Otherwise, check if it has tokenizers, and/or an image processor or
# a feature extractor
if config_class in PROCESSOR_MAPPING and "processor" in allowed_mappings:
processor_types = _to_tuple(PROCESSOR_MAPPING[config_class])
else:
if config_class in TOKENIZER_MAPPING and "tokenizer" in allowed_mappings:
processor_types = TOKENIZER_MAPPING[config_class]
if config_class in IMAGE_PROCESSOR_MAPPING and "image_processor" in allowed_mappings:
processor_types += _to_tuple(IMAGE_PROCESSOR_MAPPING[config_class])
elif config_class in FEATURE_EXTRACTOR_MAPPING and "feature_extractor" in allowed_mappings:
processor_types += _to_tuple(FEATURE_EXTRACTOR_MAPPING[config_class])
# Remark: some configurations have no processor at all. For example, generic composite models like
# `EncoderDecoderModel` is used for any (compatible) text models. Also, `DecisionTransformer` doesn't
# require any processor.
# We might get `None` for some tokenizers - remove them here.
processor_types = tuple(p for p in processor_types if p is not None)
return processor_types
|
Return a tuple of processors for `config_class`.
We use `tuple` here to include (potentially) both slow & fast tokenizers.
|
get_processor_types_from_config_class
|
python
|
huggingface/transformers
|
utils/create_dummy_models.py
|
https://github.com/huggingface/transformers/blob/master/utils/create_dummy_models.py
|
Apache-2.0
|
def get_architectures_from_config_class(config_class, arch_mappings, models_to_skip=None):
"""Return a tuple of all possible architectures attributed to a configuration class `config_class`.
For example, BertConfig -> [BertModel, BertForMaskedLM, ..., BertForQuestionAnswering].
"""
# A model architecture could appear in several mappings. For example, `BartForConditionalGeneration` is in
# - MODEL_FOR_PRETRAINING_MAPPING_NAMES
# - MODEL_WITH_LM_HEAD_MAPPING_NAMES
# - MODEL_FOR_MASKED_LM_MAPPING_NAMES
# - MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
# We avoid the duplication.
architectures = set()
if models_to_skip is None:
models_to_skip = []
models_to_skip = UNCONVERTIBLE_MODEL_ARCHITECTURES.union(models_to_skip)
for mapping in arch_mappings:
if config_class in mapping:
models = mapping[config_class]
models = tuple(models) if isinstance(models, collections.abc.Sequence) else (models,)
for model in models:
if model.__name__ not in models_to_skip:
architectures.add(model)
architectures = tuple(architectures)
return architectures
|
Return a tuple of all possible architectures attributed to a configuration class `config_class`.
For example, BertConfig -> [BertModel, BertForMaskedLM, ..., BertForQuestionAnswering].
|
get_architectures_from_config_class
|
python
|
huggingface/transformers
|
utils/create_dummy_models.py
|
https://github.com/huggingface/transformers/blob/master/utils/create_dummy_models.py
|
Apache-2.0
|
def get_config_class_from_processor_class(processor_class):
"""Get the config class from a processor class.
Some config/model classes use tokenizers/feature_extractors from other models. For example, `GPT-J` uses
`GPT2Tokenizer`. If no checkpoint is found for a config class, or a checkpoint is found without necessary file(s) to
create the processor for `processor_class`, we get the config class that corresponds to `processor_class` and use it
to find a checkpoint in order to create the processor.
"""
processor_prefix = processor_class.__name__
for postfix in ["TokenizerFast", "Tokenizer", "ImageProcessor", "FeatureExtractor", "Processor"]:
processor_prefix = processor_prefix.replace(postfix, "")
# `Wav2Vec2CTCTokenizer` -> `Wav2Vec2Config`
if processor_prefix == "Wav2Vec2CTC":
processor_prefix = "Wav2Vec2"
# Find the new configuration class
new_config_name = f"{processor_prefix}Config"
new_config_class = getattr(transformers_module, new_config_name)
return new_config_class
|
Get the config class from a processor class.
Some config/model classes use tokenizers/feature_extractors from other models. For example, `GPT-J` uses
`GPT2Tokenizer`. If no checkpoint is found for a config class, or a checkpoint is found without necessary file(s) to
create the processor for `processor_class`, we get the config class that corresponds to `processor_class` and use it
to find a checkpoint in order to create the processor.
|
get_config_class_from_processor_class
|
python
|
huggingface/transformers
|
utils/create_dummy_models.py
|
https://github.com/huggingface/transformers/blob/master/utils/create_dummy_models.py
|
Apache-2.0
|
def build_processor(config_class, processor_class, allow_no_checkpoint=False):
"""Create a processor for `processor_class`.
If a processor is not able to be built with the original arguments, this method tries to change the arguments and
call itself recursively, by inferring a new `config_class` or a new `processor_class` from another one, in order to
find a checkpoint containing the necessary files to build a processor.
The processor is not saved here. Instead, it will be saved in `convert_processors` after further changes in
`convert_processors`. For each model architecture`, a copy will be created and saved along the built model.
"""
# Currently, this solely uses the docstring in the source file of `config_class` to find a checkpoint.
checkpoint = get_checkpoint_from_config_class(config_class)
if checkpoint is None:
# try to get the checkpoint from the config class for `processor_class`.
# This helps cases like `XCLIPConfig` and `VideoMAEFeatureExtractor` to find a checkpoint from `VideoMAEConfig`.
config_class_from_processor_class = get_config_class_from_processor_class(processor_class)
checkpoint = get_checkpoint_from_config_class(config_class_from_processor_class)
processor = None
try:
processor = processor_class.from_pretrained(checkpoint)
except Exception as e:
logger.error(f"{e.__class__.__name__}: {e}")
# Try to get a new processor class from checkpoint. This is helpful for a checkpoint without necessary file to load
# processor while `processor_class` is an Auto class. For example, `sew` has `Wav2Vec2Processor` in
# `PROCESSOR_MAPPING_NAMES`, its `tokenizer_class` is `AutoTokenizer`, and the checkpoint
# `https://huggingface.co/asapp/sew-tiny-100k` has no tokenizer file, but we can get
# `tokenizer_class: Wav2Vec2CTCTokenizer` from the config file. (The new processor class won't be able to load from
# `checkpoint`, but it helps this recursive method to find a way to build a processor).
if (
processor is None
and checkpoint is not None
and issubclass(processor_class, (PreTrainedTokenizerBase, AutoTokenizer))
):
try:
config = AutoConfig.from_pretrained(checkpoint)
except Exception as e:
logger.error(f"{e.__class__.__name__}: {e}")
config = None
if config is not None:
if not isinstance(config, config_class):
raise ValueError(
f"`config` (which is of type {config.__class__.__name__}) should be an instance of `config_class`"
f" ({config_class.__name__})!"
)
tokenizer_class = config.tokenizer_class
new_processor_class = None
if tokenizer_class is not None:
new_processor_class = getattr(transformers_module, tokenizer_class)
if new_processor_class != processor_class:
processor = build_processor(config_class, new_processor_class)
# If `tokenizer_class` is not specified in `config`, let's use `config` to get the process class via auto
# mappings, but only allow the tokenizer mapping being used. This is to make `Wav2Vec2Conformer` build
if processor is None:
new_processor_classes = get_processor_types_from_config_class(
config.__class__, allowed_mappings=["tokenizer"]
)
# Used to avoid infinite recursion between a pair of fast/slow tokenizer types
names = [
x.__name__.replace("Fast", "") for x in [processor_class, new_processor_class] if x is not None
]
new_processor_classes = [
x for x in new_processor_classes if x is not None and x.__name__.replace("Fast", "") not in names
]
if len(new_processor_classes) > 0:
new_processor_class = new_processor_classes[0]
# Let's use fast tokenizer if there is any
for x in new_processor_classes:
if x.__name__.endswith("Fast"):
new_processor_class = x
break
processor = build_processor(config_class, new_processor_class)
if processor is None:
# Try to build each component (tokenizer & feature extractor) of a `ProcessorMixin`.
if issubclass(processor_class, ProcessorMixin):
attrs = {}
for attr_name in processor_class.attributes:
attrs[attr_name] = []
# This could be a tuple (for tokenizers). For example, `CLIPProcessor` has
# - feature_extractor_class = "CLIPFeatureExtractor"
# - tokenizer_class = ("CLIPTokenizer", "CLIPTokenizerFast")
attr_class_names = getattr(processor_class, f"{attr_name}_class")
if not isinstance(attr_class_names, tuple):
attr_class_names = (attr_class_names,)
for name in attr_class_names:
attr_class = getattr(transformers_module, name)
attr = build_processor(config_class, attr_class)
if attr is not None:
attrs[attr_name].append(attr)
# try to build a `ProcessorMixin`, so we can return a single value
if all(len(v) > 0 for v in attrs.values()):
try:
processor = processor_class(**{k: v[0] for k, v in attrs.items()})
except Exception as e:
logger.error(f"{e.__class__.__name__}: {e}")
else:
# `checkpoint` might lack some file(s) to load a processor. For example, `facebook/hubert-base-ls960`
# has no tokenizer file to load `Wav2Vec2CTCTokenizer`. In this case, we try to build a processor
# with the configuration class (for example, `Wav2Vec2Config`) corresponding to `processor_class`.
config_class_from_processor_class = get_config_class_from_processor_class(processor_class)
if config_class_from_processor_class != config_class:
processor = build_processor(config_class_from_processor_class, processor_class)
# Try to create an image processor or a feature extractor without any checkpoint
if (
processor is None
and allow_no_checkpoint
and (issubclass(processor_class, BaseImageProcessor) or issubclass(processor_class, FeatureExtractionMixin))
):
try:
processor = processor_class()
except Exception as e:
logger.error(f"{e.__class__.__name__}: {e}")
# validation
if processor is not None:
if not (isinstance(processor, processor_class) or processor_class.__name__.startswith("Auto")):
raise ValueError(
f"`processor` (which is of type {processor.__class__.__name__}) should be an instance of"
f" {processor_class.__name__} or an Auto class!"
)
return processor
|
Create a processor for `processor_class`.
If a processor is not able to be built with the original arguments, this method tries to change the arguments and
call itself recursively, by inferring a new `config_class` or a new `processor_class` from another one, in order to
find a checkpoint containing the necessary files to build a processor.
The processor is not saved here. Instead, it will be saved in `convert_processors` after further changes in
`convert_processors`. For each model architecture`, a copy will be created and saved along the built model.
|
build_processor
|
python
|
huggingface/transformers
|
utils/create_dummy_models.py
|
https://github.com/huggingface/transformers/blob/master/utils/create_dummy_models.py
|
Apache-2.0
|
def get_tiny_config(config_class, model_class=None, **model_tester_kwargs):
"""Retrieve a tiny configuration from `config_class` using each model's `ModelTester`.
Args:
config_class: Subclass of `PreTrainedConfig`.
Returns:
An instance of `config_class` with tiny hyperparameters
"""
model_type = config_class.model_type
# For model type like `data2vec-vision` and `donut-swin`, we can't get the config/model file name directly via
# `model_type` as it would be sth. like `configuration_data2vec_vision.py`.
# A simple way is to use `inspect.getsourcefile(config_class)`.
config_source_file = inspect.getsourcefile(config_class)
# The modeling file name without prefix (`modeling_`) and postfix (`.py`)
modeling_name = config_source_file.split(os.path.sep)[-1].replace("configuration_", "").replace(".py", "")
try:
print("Importing", model_type_to_module_name(model_type))
module_name = model_type_to_module_name(model_type)
if not modeling_name.startswith(module_name):
raise ValueError(f"{modeling_name} doesn't start with {module_name}!")
test_file = os.path.join("tests", "models", module_name, f"test_modeling_{modeling_name}.py")
models_to_model_testers = get_model_to_tester_mapping(test_file)
# Find the model tester class
model_tester_class = None
tester_classes = []
if model_class is not None:
tester_classes = get_tester_classes_for_model(test_file, model_class)
else:
for _tester_classes in models_to_model_testers.values():
tester_classes.extend(_tester_classes)
if len(tester_classes) > 0:
# sort with the length of the class names first, then the alphabetical order
# This is to avoid `T5EncoderOnlyModelTest` is used instead of `T5ModelTest`, which has
# `is_encoder_decoder=False` and causes some pipeline tests failing (also failures in `Optimum` CI).
# TODO: More fine grained control of the desired tester class.
model_tester_class = sorted(tester_classes, key=lambda x: (len(x.__name__), x.__name__))[0]
except ModuleNotFoundError:
error = f"Tiny config not created for {model_type} - cannot find the testing module from the model name."
raise ValueError(error)
if model_tester_class is None:
error = f"Tiny config not created for {model_type} - no model tester is found in the testing module."
raise ValueError(error)
# CLIP-like models have `text_model_tester` and `vision_model_tester`, and we need to pass `vocab_size` to
# `text_model_tester` via `text_kwargs`. The same trick is also necessary for `Flava`.
if "vocab_size" in model_tester_kwargs:
if "text_kwargs" in inspect.signature(model_tester_class.__init__).parameters.keys():
vocab_size = model_tester_kwargs.pop("vocab_size")
model_tester_kwargs["text_kwargs"] = {"vocab_size": vocab_size}
# `parent` is an instance of `unittest.TestCase`, but we don't need it here.
model_tester = model_tester_class(parent=None, **model_tester_kwargs)
if hasattr(model_tester, "get_pipeline_config"):
config = model_tester.get_pipeline_config()
elif hasattr(model_tester, "prepare_config_and_inputs"):
# `PoolFormer` has no `get_config` defined. Furthermore, it's better to use `prepare_config_and_inputs` even if
# `get_config` is defined, since there might be some extra changes in `prepare_config_and_inputs`.
config = model_tester.prepare_config_and_inputs()[0]
elif hasattr(model_tester, "get_config"):
config = model_tester.get_config()
else:
error = (
f"Tiny config not created for {model_type} - the model tester {model_tester_class.__name__} lacks"
" necessary method to create config."
)
raise ValueError(error)
# make sure this is long enough (some model tester has `20` for this attr.) to pass `text-generation`
# pipeline tests.
max_positions = []
for key in ["max_position_embeddings", "max_source_positions", "max_target_positions"]:
if getattr(config, key, 0) > 0:
max_positions.append(getattr(config, key))
if getattr(config, "text_config", None) is not None:
if getattr(config.text_config, key, None) is not None:
max_positions.append(getattr(config.text_config, key))
if len(max_positions) > 0:
max_position = max(200, min(max_positions))
for key in ["max_position_embeddings", "max_source_positions", "max_target_positions"]:
if getattr(config, key, 0) > 0:
setattr(config, key, max_position)
if getattr(config, "text_config", None) is not None:
if getattr(config.text_config, key, None) is not None:
setattr(config.text_config, key, max_position)
return config
|
Retrieve a tiny configuration from `config_class` using each model's `ModelTester`.
Args:
config_class: Subclass of `PreTrainedConfig`.
Returns:
An instance of `config_class` with tiny hyperparameters
|
get_tiny_config
|
python
|
huggingface/transformers
|
utils/create_dummy_models.py
|
https://github.com/huggingface/transformers/blob/master/utils/create_dummy_models.py
|
Apache-2.0
|
def convert_processors(processors, tiny_config, output_folder, result):
"""Change a processor to work with smaller inputs.
For tokenizers, we try to reduce their vocabulary size.
For feature extractor, we use smaller image size or change
other attributes using the values from `tiny_config`. See `convert_feature_extractor`.
This method should not fail: we catch the errors and put them in `result["warnings"]` with descriptive messages.
"""
def _sanity_check(fast_tokenizer, slow_tokenizer, keep_fast_tokenizer=False):
"""Set tokenizer(s) to `None` if the fast/slow tokenizers have different values for `vocab_size` or `length`.
If `keep_fast_tokenizer=True`, the fast tokenizer will be kept.
"""
# sanity check 1: fast and slow tokenizers should be compatible (vocab_size)
if fast_tokenizer is not None and slow_tokenizer is not None:
if fast_tokenizer.vocab_size != slow_tokenizer.vocab_size:
warning_message = (
"The fast/slow tokenizers "
f"({fast_tokenizer.__class__.__name__}/{slow_tokenizer.__class__.__name__}) have different "
"vocabulary size: "
f"fast_tokenizer.vocab_size = {fast_tokenizer.vocab_size} and "
f"slow_tokenizer.vocab_size = {slow_tokenizer.vocab_size}."
)
result["warnings"].append(warning_message)
if not keep_fast_tokenizer:
fast_tokenizer = None
slow_tokenizer = None
# sanity check 2: fast and slow tokenizers should be compatible (length)
if fast_tokenizer is not None and slow_tokenizer is not None:
if len(fast_tokenizer) != len(slow_tokenizer):
warning_message = (
f"The fast/slow tokenizers () have different length: "
f"len(fast_tokenizer) = {len(fast_tokenizer)} and "
f"len(slow_tokenizer) = {len(slow_tokenizer)}."
)
result["warnings"].append(warning_message)
if not keep_fast_tokenizer:
fast_tokenizer = None
slow_tokenizer = None
return fast_tokenizer, slow_tokenizer
tokenizers = []
feature_extractors = []
for processor in processors:
if isinstance(processor, PreTrainedTokenizerBase):
if processor.__class__.__name__ not in {x.__class__.__name__ for x in tokenizers}:
tokenizers.append(processor)
elif isinstance(processor, BaseImageProcessor):
if processor.__class__.__name__ not in {x.__class__.__name__ for x in feature_extractors}:
feature_extractors.append(processor)
elif isinstance(processor, FeatureExtractionMixin):
if processor.__class__.__name__ not in {x.__class__.__name__ for x in feature_extractors}:
feature_extractors.append(processor)
elif isinstance(processor, ProcessorMixin):
if hasattr(processor, "tokenizer"):
if processor.tokenizer.__class__.__name__ not in {x.__class__.__name__ for x in tokenizers}:
tokenizers.append(processor.tokenizer)
# Currently, we only have these 2 possibilities
if hasattr(processor, "image_processor"):
if processor.image_processor.__class__.__name__ not in {
x.__class__.__name__ for x in feature_extractors
}:
feature_extractors.append(processor.image_processor)
elif hasattr(processor, "feature_extractor"):
if processor.feature_extractor.__class__.__name__ not in {
x.__class__.__name__ for x in feature_extractors
}:
feature_extractors.append(processor.feature_extractor)
# check the built processors have the unique type
num_types = len({x.__class__.__name__ for x in feature_extractors})
if num_types >= 2:
raise ValueError(f"`feature_extractors` should contain at most 1 type, but it contains {num_types} types!")
num_types = len({x.__class__.__name__.replace("Fast", "") for x in tokenizers})
if num_types >= 2:
raise ValueError(f"`tokenizers` should contain at most 1 tokenizer type, but it contains {num_types} types!")
fast_tokenizer = None
slow_tokenizer = None
for tokenizer in tokenizers:
if isinstance(tokenizer, PreTrainedTokenizerFast):
fast_tokenizer = tokenizer
else:
slow_tokenizer = tokenizer
# If the (original) fast/slow tokenizers don't correspond, keep only the fast tokenizer.
# This doesn't necessarily imply the fast/slow tokenizers in a single Hub repo. has issues.
# It's more of an issue in `build_processor` which tries to get a checkpoint with as much effort as possible.
# For `YosoModel` (which uses `AlbertTokenizer(Fast)`), its real (Hub) checkpoint doesn't contain valid files to
# load the slower tokenizer (`AlbertTokenizer`), and it ends up finding the (canonical) checkpoint of `AlbertModel`,
# which has different vocabulary.
# TODO: Try to improve `build_processor`'s definition and/or usage to avoid the above situation in the first place.
fast_tokenizer, slow_tokenizer = _sanity_check(fast_tokenizer, slow_tokenizer, keep_fast_tokenizer=True)
original_fast_tokenizer, original_slow_tokenizer = fast_tokenizer, slow_tokenizer
if fast_tokenizer:
try:
# Wav2Vec2ForCTC , ByT5Tokenizer etc. all are already small enough and have no fast version that can
# be retrained
if fast_tokenizer.vocab_size > TARGET_VOCAB_SIZE:
fast_tokenizer = convert_tokenizer(fast_tokenizer)
except Exception:
result["warnings"].append(
(
f"Failed to convert the fast tokenizer for {fast_tokenizer.__class__.__name__}.",
traceback.format_exc(),
)
)
# If `fast_tokenizer` exists, `slow_tokenizer` should correspond to it.
if fast_tokenizer:
# Make sure the fast tokenizer can be saved
try:
# We don't save it to `output_folder` at this moment - only at the end of this function.
with tempfile.TemporaryDirectory() as tmpdir:
fast_tokenizer.save_pretrained(tmpdir)
try:
slow_tokenizer = AutoTokenizer.from_pretrained(tmpdir, use_fast=False)
except Exception:
result["warnings"].append(
(
f"Failed to load the slow tokenizer saved from {fast_tokenizer.__class__.__name__}.",
traceback.format_exc(),
)
)
# Let's just keep the fast version
slow_tokenizer = None
except Exception:
result["warnings"].append(
(
f"Failed to save the fast tokenizer for {fast_tokenizer.__class__.__name__}.",
traceback.format_exc(),
)
)
fast_tokenizer = None
# If the (possibly converted) fast/slow tokenizers don't correspond, set them to `None`, and use the original
# tokenizers.
fast_tokenizer, slow_tokenizer = _sanity_check(fast_tokenizer, slow_tokenizer, keep_fast_tokenizer=False)
# If there is any conversion failed, we keep the original tokenizers.
if (original_fast_tokenizer is not None and fast_tokenizer is None) or (
original_slow_tokenizer is not None and slow_tokenizer is None
):
warning_messagae = (
"There are some issues when converting the fast/slow tokenizers. The original tokenizers from the Hub "
" will be used instead."
)
result["warnings"].append(warning_messagae)
# Let's use the original version at the end (`original_fast_tokenizer` and `original_slow_tokenizer`)
fast_tokenizer = original_fast_tokenizer
slow_tokenizer = original_slow_tokenizer
# Make sure the fast tokenizer can be saved
if fast_tokenizer:
# We don't save it to `output_folder` at this moment - only at the end of this function.
with tempfile.TemporaryDirectory() as tmpdir:
try:
fast_tokenizer.save_pretrained(tmpdir)
except Exception:
result["warnings"].append(
(
f"Failed to save the fast tokenizer for {fast_tokenizer.__class__.__name__}.",
traceback.format_exc(),
)
)
fast_tokenizer = None
# Make sure the slow tokenizer can be saved
if slow_tokenizer:
# We don't save it to `output_folder` at this moment - only at the end of this function.
with tempfile.TemporaryDirectory() as tmpdir:
try:
slow_tokenizer.save_pretrained(tmpdir)
except Exception:
result["warnings"].append(
(
f"Failed to save the slow tokenizer for {slow_tokenizer.__class__.__name__}.",
traceback.format_exc(),
)
)
slow_tokenizer = None
# update feature extractors using the tiny config
try:
feature_extractors = [convert_feature_extractor(p, tiny_config) for p in feature_extractors]
except Exception:
result["warnings"].append(
(
"Failed to convert feature extractors.",
traceback.format_exc(),
)
)
feature_extractors = []
if hasattr(tiny_config, "max_position_embeddings") and tiny_config.max_position_embeddings > 0:
if fast_tokenizer is not None:
if fast_tokenizer.__class__.__name__ in [
"RobertaTokenizerFast",
"XLMRobertaTokenizerFast",
"LongformerTokenizerFast",
"MPNetTokenizerFast",
]:
fast_tokenizer.model_max_length = tiny_config.max_position_embeddings - 2
else:
fast_tokenizer.model_max_length = tiny_config.max_position_embeddings
if slow_tokenizer is not None:
if slow_tokenizer.__class__.__name__ in [
"RobertaTokenizer",
"XLMRobertaTokenizer",
"LongformerTokenizer",
"MPNetTokenizer",
]:
slow_tokenizer.model_max_length = tiny_config.max_position_embeddings - 2
else:
slow_tokenizer.model_max_length = tiny_config.max_position_embeddings
processors = [fast_tokenizer, slow_tokenizer] + feature_extractors
processors = [p for p in processors if p is not None]
for p in processors:
p.save_pretrained(output_folder)
return processors
|
Change a processor to work with smaller inputs.
For tokenizers, we try to reduce their vocabulary size.
For feature extractor, we use smaller image size or change
other attributes using the values from `tiny_config`. See `convert_feature_extractor`.
This method should not fail: we catch the errors and put them in `result["warnings"]` with descriptive messages.
|
convert_processors
|
python
|
huggingface/transformers
|
utils/create_dummy_models.py
|
https://github.com/huggingface/transformers/blob/master/utils/create_dummy_models.py
|
Apache-2.0
|
def _sanity_check(fast_tokenizer, slow_tokenizer, keep_fast_tokenizer=False):
"""Set tokenizer(s) to `None` if the fast/slow tokenizers have different values for `vocab_size` or `length`.
If `keep_fast_tokenizer=True`, the fast tokenizer will be kept.
"""
# sanity check 1: fast and slow tokenizers should be compatible (vocab_size)
if fast_tokenizer is not None and slow_tokenizer is not None:
if fast_tokenizer.vocab_size != slow_tokenizer.vocab_size:
warning_message = (
"The fast/slow tokenizers "
f"({fast_tokenizer.__class__.__name__}/{slow_tokenizer.__class__.__name__}) have different "
"vocabulary size: "
f"fast_tokenizer.vocab_size = {fast_tokenizer.vocab_size} and "
f"slow_tokenizer.vocab_size = {slow_tokenizer.vocab_size}."
)
result["warnings"].append(warning_message)
if not keep_fast_tokenizer:
fast_tokenizer = None
slow_tokenizer = None
# sanity check 2: fast and slow tokenizers should be compatible (length)
if fast_tokenizer is not None and slow_tokenizer is not None:
if len(fast_tokenizer) != len(slow_tokenizer):
warning_message = (
f"The fast/slow tokenizers () have different length: "
f"len(fast_tokenizer) = {len(fast_tokenizer)} and "
f"len(slow_tokenizer) = {len(slow_tokenizer)}."
)
result["warnings"].append(warning_message)
if not keep_fast_tokenizer:
fast_tokenizer = None
slow_tokenizer = None
return fast_tokenizer, slow_tokenizer
|
Set tokenizer(s) to `None` if the fast/slow tokenizers have different values for `vocab_size` or `length`.
If `keep_fast_tokenizer=True`, the fast tokenizer will be kept.
|
_sanity_check
|
python
|
huggingface/transformers
|
utils/create_dummy_models.py
|
https://github.com/huggingface/transformers/blob/master/utils/create_dummy_models.py
|
Apache-2.0
|
def get_checkpoint_dir(output_dir, model_arch):
"""Get framework-agnostic architecture name. Used to save all PT/TF/Flax models into the same directory."""
arch_name = model_arch.__name__
if arch_name.startswith("TF"):
arch_name = arch_name[2:]
elif arch_name.startswith("Flax"):
arch_name = arch_name[4:]
return os.path.join(output_dir, arch_name)
|
Get framework-agnostic architecture name. Used to save all PT/TF/Flax models into the same directory.
|
get_checkpoint_dir
|
python
|
huggingface/transformers
|
utils/create_dummy_models.py
|
https://github.com/huggingface/transformers/blob/master/utils/create_dummy_models.py
|
Apache-2.0
|
def build_model(model_arch, tiny_config, output_dir):
"""Create and save a model for `model_arch`.
Also copy the set of processors to each model (under the same model type) output folder.
"""
checkpoint_dir = get_checkpoint_dir(output_dir, model_arch)
processor_output_dir = os.path.join(output_dir, "processors")
# copy the (same set of) processors (for a model type) to the model arch. specific folder
if os.path.isdir(processor_output_dir):
shutil.copytree(processor_output_dir, checkpoint_dir, dirs_exist_ok=True)
tiny_config = copy.deepcopy(tiny_config)
if any(model_arch.__name__.endswith(x) for x in ["ForCausalLM", "LMHeadModel"]):
tiny_config.is_encoder_decoder = False
tiny_config.is_decoder = True
model = model_arch(config=tiny_config)
model.save_pretrained(checkpoint_dir)
model.from_pretrained(checkpoint_dir)
return model
|
Create and save a model for `model_arch`.
Also copy the set of processors to each model (under the same model type) output folder.
|
build_model
|
python
|
huggingface/transformers
|
utils/create_dummy_models.py
|
https://github.com/huggingface/transformers/blob/master/utils/create_dummy_models.py
|
Apache-2.0
|
def fill_result_with_error(result, error, trace, models_to_create):
"""Fill `result` with errors for all target model arch if we can't build processor"""
error = (error, trace)
result["error"] = error
for framework in FRAMEWORKS:
if framework in models_to_create:
result[framework] = {}
for model_arch in models_to_create[framework]:
result[framework][model_arch.__name__] = {"model": None, "checkpoint": None, "error": error}
result["processor"] = {p.__class__.__name__: p.__class__.__name__ for p in result["processor"].values()}
|
Fill `result` with errors for all target model arch if we can't build processor
|
fill_result_with_error
|
python
|
huggingface/transformers
|
utils/create_dummy_models.py
|
https://github.com/huggingface/transformers/blob/master/utils/create_dummy_models.py
|
Apache-2.0
|
def get_token_id_from_tokenizer(token_id_name, tokenizer, original_token_id):
"""Use `tokenizer` to get the values of `bos_token_id`, `eos_token_ids`, etc.
The argument `token_id_name` should be a string ending with `_token_id`, and `original_token_id` should be an
integer that will be return if `tokenizer` has no token corresponding to `token_id_name`.
"""
token_id = original_token_id
if not token_id_name.endswith("_token_id"):
raise ValueError(f"`token_id_name` is {token_id_name}, which doesn't end with `_token_id`!")
token = getattr(tokenizer, token_id_name.replace("_token_id", "_token"), None)
if token is not None:
if isinstance(tokenizer, PreTrainedTokenizerFast):
token_id = tokenizer._convert_token_to_id_with_added_voc(token)
else:
token_id = tokenizer._convert_token_to_id(token)
return token_id
|
Use `tokenizer` to get the values of `bos_token_id`, `eos_token_ids`, etc.
The argument `token_id_name` should be a string ending with `_token_id`, and `original_token_id` should be an
integer that will be return if `tokenizer` has no token corresponding to `token_id_name`.
|
get_token_id_from_tokenizer
|
python
|
huggingface/transformers
|
utils/create_dummy_models.py
|
https://github.com/huggingface/transformers/blob/master/utils/create_dummy_models.py
|
Apache-2.0
|
def build(config_class, models_to_create, output_dir):
"""Create all models for a certain model type.
Args:
config_class (`PretrainedConfig`):
A subclass of `PretrainedConfig` that is used to determine `models_to_create`.
models_to_create (`dict`):
A dictionary containing the processor/model classes that we want to create the instances. These models are
of the same model type which is associated to `config_class`.
output_dir (`str`):
The directory to save all the checkpoints. Each model architecture will be saved in a subdirectory under
it. Models in different frameworks with the same architecture will be saved in the same subdirectory.
"""
if data["training_ds"] is None or data["testing_ds"] is None:
ds = load_dataset("Salesforce/wikitext", "wikitext-2-raw-v1")
data["training_ds"] = ds["train"]
data["testing_ds"] = ds["test"]
if config_class.model_type in [
"encoder-decoder",
"vision-encoder-decoder",
"speech-encoder-decoder",
"vision-text-dual-encoder",
]:
return build_composite_models(config_class, output_dir)
result = {k: {} for k in models_to_create}
# These will be removed at the end if they are empty
result["error"] = None
result["warnings"] = []
# Build processors
processor_classes = models_to_create["processor"]
if len(processor_classes) == 0:
error = f"No processor class could be found in {config_class.__name__}."
fill_result_with_error(result, error, None, models_to_create)
logger.error(result["error"][0])
return result
for processor_class in processor_classes:
try:
processor = build_processor(config_class, processor_class, allow_no_checkpoint=True)
if processor is not None:
result["processor"][processor_class] = processor
except Exception:
error = f"Failed to build processor for {processor_class.__name__}."
trace = traceback.format_exc()
fill_result_with_error(result, error, trace, models_to_create)
logger.error(result["error"][0])
return result
if len(result["processor"]) == 0:
error = f"No processor could be built for {config_class.__name__}."
fill_result_with_error(result, error, None, models_to_create)
logger.error(result["error"][0])
return result
try:
tiny_config = get_tiny_config(config_class)
except Exception as e:
error = f"Failed to get tiny config for {config_class.__name__}: {e}"
trace = traceback.format_exc()
fill_result_with_error(result, error, trace, models_to_create)
logger.error(result["error"][0])
return result
# Convert the processors (reduce vocabulary size, smaller image size, etc.)
processors = list(result["processor"].values())
processor_output_folder = os.path.join(output_dir, "processors")
try:
processors = convert_processors(processors, tiny_config, processor_output_folder, result)
except Exception:
error = "Failed to convert the processors."
trace = traceback.format_exc()
result["warnings"].append((error, trace))
if len(processors) == 0:
error = f"No processor is returned by `convert_processors` for {config_class.__name__}."
fill_result_with_error(result, error, None, models_to_create)
logger.error(result["error"][0])
return result
try:
config_overrides = get_config_overrides(config_class, processors)
except Exception as e:
error = f"Failure occurs while calling `get_config_overrides`: {e}"
trace = traceback.format_exc()
fill_result_with_error(result, error, trace, models_to_create)
logger.error(result["error"][0])
return result
# Just for us to see this easily in the report
if "vocab_size" in config_overrides:
result["vocab_size"] = config_overrides["vocab_size"]
# Update attributes that `vocab_size` involves
for k, v in config_overrides.items():
if hasattr(tiny_config, k):
setattr(tiny_config, k, v)
# So far, we only have to deal with `text_config`, as `config_overrides` contains text-related attributes only.
# `FuyuConfig` saves data under both FuyuConfig and its `text_config`. This is not good, but let's just update
# every involved fields to avoid potential failure.
if (
hasattr(tiny_config, "text_config")
and tiny_config.text_config is not None
and hasattr(tiny_config.text_config, k)
):
setattr(tiny_config.text_config, k, v)
# If `text_config_dict` exists, we need to update its value here too in order to # make
# `save_pretrained -> from_pretrained` work.
if hasattr(tiny_config, "text_config_dict"):
tiny_config.text_config_dict[k] = v
if result["warnings"]:
logger.warning(result["warnings"][0][0])
# update `result["processor"]`
result["processor"] = {type(p).__name__: p.__class__.__name__ for p in processors}
for pytorch_arch in models_to_create["pytorch"]:
result["pytorch"][pytorch_arch.__name__] = {}
error = None
try:
model = build_model(pytorch_arch, tiny_config, output_dir=output_dir)
except Exception as e:
model = None
error = f"Failed to create the pytorch model for {pytorch_arch}: {e}"
trace = traceback.format_exc()
result["pytorch"][pytorch_arch.__name__]["model"] = model.__class__.__name__ if model is not None else None
result["pytorch"][pytorch_arch.__name__]["checkpoint"] = (
get_checkpoint_dir(output_dir, pytorch_arch) if model is not None else None
)
if error is not None:
result["pytorch"][pytorch_arch.__name__]["error"] = (error, trace)
logger.error(f"{pytorch_arch.__name__}: {error}")
for tensorflow_arch in models_to_create["tensorflow"]:
# Make PT/TF weights compatible
pt_arch_name = tensorflow_arch.__name__[2:] # Remove `TF`
pt_arch = getattr(transformers_module, pt_arch_name)
result["tensorflow"][tensorflow_arch.__name__] = {}
error = None
if pt_arch.__name__ in result["pytorch"] and result["pytorch"][pt_arch.__name__]["checkpoint"] is not None:
ckpt = get_checkpoint_dir(output_dir, pt_arch)
# Use the same weights from PyTorch.
try:
model = tensorflow_arch.from_pretrained(ckpt)
model.save_pretrained(ckpt)
except Exception as e:
# Conversion may fail. Let's not create a model with different weights to avoid confusion (for now).
model = None
error = f"Failed to convert the pytorch model to the tensorflow model for {pt_arch}: {e}"
trace = traceback.format_exc()
else:
try:
model = build_model(tensorflow_arch, tiny_config, output_dir=output_dir)
except Exception as e:
model = None
error = f"Failed to create the tensorflow model for {tensorflow_arch}: {e}"
trace = traceback.format_exc()
result["tensorflow"][tensorflow_arch.__name__]["model"] = (
model.__class__.__name__ if model is not None else None
)
result["tensorflow"][tensorflow_arch.__name__]["checkpoint"] = (
get_checkpoint_dir(output_dir, tensorflow_arch) if model is not None else None
)
if error is not None:
result["tensorflow"][tensorflow_arch.__name__]["error"] = (error, trace)
logger.error(f"{tensorflow_arch.__name__}: {error}")
if not result["error"]:
del result["error"]
if not result["warnings"]:
del result["warnings"]
return result
|
Create all models for a certain model type.
Args:
config_class (`PretrainedConfig`):
A subclass of `PretrainedConfig` that is used to determine `models_to_create`.
models_to_create (`dict`):
A dictionary containing the processor/model classes that we want to create the instances. These models are
of the same model type which is associated to `config_class`.
output_dir (`str`):
The directory to save all the checkpoints. Each model architecture will be saved in a subdirectory under
it. Models in different frameworks with the same architecture will be saved in the same subdirectory.
|
build
|
python
|
huggingface/transformers
|
utils/create_dummy_models.py
|
https://github.com/huggingface/transformers/blob/master/utils/create_dummy_models.py
|
Apache-2.0
|
def build_tiny_model_summary(results, organization=None, token=None):
"""Build a summary: a dictionary of the form
{
model architecture name:
{
"tokenizer_classes": [...],
"processor_classes": [...],
"model_classes": [...],
}
..
}
"""
tiny_model_summary = {}
for config_name in results:
processors = [key for key, value in results[config_name]["processor"].items()]
tokenizer_classes = sorted([x for x in processors if x.endswith("TokenizerFast") or x.endswith("Tokenizer")])
processor_classes = sorted([x for x in processors if x not in tokenizer_classes])
for framework in FRAMEWORKS:
if framework not in results[config_name]:
continue
for arch_name in results[config_name][framework]:
model_classes = [arch_name]
base_arch_name = arch_name[2:] if arch_name.startswith("TF") else arch_name
# tiny model is not created for `arch_name`
if results[config_name][framework][arch_name]["model"] is None:
model_classes = []
if base_arch_name not in tiny_model_summary:
tiny_model_summary[base_arch_name] = {}
tiny_model_summary[base_arch_name].update(
{
"tokenizer_classes": tokenizer_classes,
"processor_classes": processor_classes,
}
)
tiny_model_summary[base_arch_name]["model_classes"] = sorted(
tiny_model_summary[base_arch_name].get("model_classes", []) + model_classes
)
if organization is not None:
repo_name = f"tiny-random-{base_arch_name}"
# composite models' checkpoints have more precise repo. names on the Hub.
if base_arch_name in COMPOSITE_MODELS:
repo_name = f"tiny-random-{COMPOSITE_MODELS[base_arch_name]}"
repo_id = f"{organization}/{repo_name}"
try:
commit_hash = hf_api.repo_info(repo_id, token=token).sha
except Exception:
# The directory is not created, but processor(s) is/are included in `results`.
logger.warning(f"Failed to get information for {repo_id}.\n{traceback.format_exc()}")
del tiny_model_summary[base_arch_name]
continue
tiny_model_summary[base_arch_name]["sha"] = commit_hash
return tiny_model_summary
|
Build a summary: a dictionary of the form
{
model architecture name:
{
"tokenizer_classes": [...],
"processor_classes": [...],
"model_classes": [...],
}
..
}
|
build_tiny_model_summary
|
python
|
huggingface/transformers
|
utils/create_dummy_models.py
|
https://github.com/huggingface/transformers/blob/master/utils/create_dummy_models.py
|
Apache-2.0
|
def get_indent(line: str) -> str:
"""Returns the indent in given line (as string)."""
search = _re_indent.search(line)
return "" if search is None else search.groups()[0]
|
Returns the indent in given line (as string).
|
get_indent
|
python
|
huggingface/transformers
|
utils/custom_init_isort.py
|
https://github.com/huggingface/transformers/blob/master/utils/custom_init_isort.py
|
Apache-2.0
|
def split_code_in_indented_blocks(
code: str, indent_level: str = "", start_prompt: Optional[str] = None, end_prompt: Optional[str] = None
) -> List[str]:
"""
Split some code into its indented blocks, starting at a given level.
Args:
code (`str`): The code to split.
indent_level (`str`): The indent level (as string) to use for identifying the blocks to split.
start_prompt (`str`, *optional*): If provided, only starts splitting at the line where this text is.
end_prompt (`str`, *optional*): If provided, stops splitting at a line where this text is.
Warning:
The text before `start_prompt` or after `end_prompt` (if provided) is not ignored, just not split. The input `code`
can thus be retrieved by joining the result.
Returns:
`List[str]`: The list of blocks.
"""
# Let's split the code into lines and move to start_index.
index = 0
lines = code.split("\n")
if start_prompt is not None:
while not lines[index].startswith(start_prompt):
index += 1
blocks = ["\n".join(lines[:index])]
else:
blocks = []
# This variable contains the block treated at a given time.
current_block = [lines[index]]
index += 1
# We split into blocks until we get to the `end_prompt` (or the end of the file).
while index < len(lines) and (end_prompt is None or not lines[index].startswith(end_prompt)):
# We have a non-empty line with the proper indent -> start of a new block
if len(lines[index]) > 0 and get_indent(lines[index]) == indent_level:
# Store the current block in the result and rest. There are two cases: the line is part of the block (like
# a closing parenthesis) or not.
if len(current_block) > 0 and get_indent(current_block[-1]).startswith(indent_level + " "):
# Line is part of the current block
current_block.append(lines[index])
blocks.append("\n".join(current_block))
if index < len(lines) - 1:
current_block = [lines[index + 1]]
index += 1
else:
current_block = []
else:
# Line is not part of the current block
blocks.append("\n".join(current_block))
current_block = [lines[index]]
else:
# Just add the line to the current block
current_block.append(lines[index])
index += 1
# Adds current block if it's nonempty.
if len(current_block) > 0:
blocks.append("\n".join(current_block))
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lines):
blocks.append("\n".join(lines[index:]))
return blocks
|
Split some code into its indented blocks, starting at a given level.
Args:
code (`str`): The code to split.
indent_level (`str`): The indent level (as string) to use for identifying the blocks to split.
start_prompt (`str`, *optional*): If provided, only starts splitting at the line where this text is.
end_prompt (`str`, *optional*): If provided, stops splitting at a line where this text is.
Warning:
The text before `start_prompt` or after `end_prompt` (if provided) is not ignored, just not split. The input `code`
can thus be retrieved by joining the result.
Returns:
`List[str]`: The list of blocks.
|
split_code_in_indented_blocks
|
python
|
huggingface/transformers
|
utils/custom_init_isort.py
|
https://github.com/huggingface/transformers/blob/master/utils/custom_init_isort.py
|
Apache-2.0
|
def ignore_underscore_and_lowercase(key: Callable[[Any], str]) -> Callable[[Any], str]:
"""
Wraps a key function (as used in a sort) to lowercase and ignore underscores.
"""
def _inner(x):
return key(x).lower().replace("_", "")
return _inner
|
Wraps a key function (as used in a sort) to lowercase and ignore underscores.
|
ignore_underscore_and_lowercase
|
python
|
huggingface/transformers
|
utils/custom_init_isort.py
|
https://github.com/huggingface/transformers/blob/master/utils/custom_init_isort.py
|
Apache-2.0
|
def sort_objects(objects: List[Any], key: Optional[Callable[[Any], str]] = None) -> List[Any]:
"""
Sort a list of objects following the rules of isort (all uppercased first, camel-cased second and lower-cased
last).
Args:
objects (`List[Any]`):
The list of objects to sort.
key (`Callable[[Any], str]`, *optional*):
A function taking an object as input and returning a string, used to sort them by alphabetical order.
If not provided, will default to noop (so a `key` must be provided if the `objects` are not of type string).
Returns:
`List[Any]`: The sorted list with the same elements as in the inputs
"""
# If no key is provided, we use a noop.
def noop(x):
return x
if key is None:
key = noop
# Constants are all uppercase, they go first.
constants = [obj for obj in objects if key(obj).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
classes = [obj for obj in objects if key(obj)[0].isupper() and not key(obj).isupper()]
# Functions begin with a lowercase, they go last.
functions = [obj for obj in objects if not key(obj)[0].isupper()]
# Then we sort each group.
key1 = ignore_underscore_and_lowercase(key)
return sorted(constants, key=key1) + sorted(classes, key=key1) + sorted(functions, key=key1)
|
Sort a list of objects following the rules of isort (all uppercased first, camel-cased second and lower-cased
last).
Args:
objects (`List[Any]`):
The list of objects to sort.
key (`Callable[[Any], str]`, *optional*):
A function taking an object as input and returning a string, used to sort them by alphabetical order.
If not provided, will default to noop (so a `key` must be provided if the `objects` are not of type string).
Returns:
`List[Any]`: The sorted list with the same elements as in the inputs
|
sort_objects
|
python
|
huggingface/transformers
|
utils/custom_init_isort.py
|
https://github.com/huggingface/transformers/blob/master/utils/custom_init_isort.py
|
Apache-2.0
|
def sort_objects_in_import(import_statement: str) -> str:
"""
Sorts the imports in a single import statement.
Args:
import_statement (`str`): The import statement in which to sort the imports.
Returns:
`str`: The same as the input, but with objects properly sorted.
"""
# This inner function sort imports between [ ].
def _replace(match):
imports = match.groups()[0]
# If there is one import only, nothing to do.
if "," not in imports:
return f"[{imports}]"
keys = [part.strip().replace('"', "") for part in imports.split(",")]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1]) == 0:
keys = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(keys)]) + "]"
lines = import_statement.split("\n")
if len(lines) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
idx = 2 if lines[1].strip() == "[" else 1
keys_to_sort = [(i, _re_strip_line.search(line).groups()[0]) for i, line in enumerate(lines[idx:-idx])]
sorted_indices = sort_objects(keys_to_sort, key=lambda x: x[1])
sorted_lines = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:])
elif len(lines) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1]) is not None:
lines[1] = _re_bracket_content.sub(_replace, lines[1])
else:
keys = [part.strip().replace('"', "") for part in lines[1].split(",")]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1]) == 0:
keys = keys[:-1]
lines[1] = get_indent(lines[1]) + ", ".join([f'"{k}"' for k in sort_objects(keys)])
return "\n".join(lines)
else:
# Finally we have to deal with imports fitting on one line
import_statement = _re_bracket_content.sub(_replace, import_statement)
return import_statement
|
Sorts the imports in a single import statement.
Args:
import_statement (`str`): The import statement in which to sort the imports.
Returns:
`str`: The same as the input, but with objects properly sorted.
|
sort_objects_in_import
|
python
|
huggingface/transformers
|
utils/custom_init_isort.py
|
https://github.com/huggingface/transformers/blob/master/utils/custom_init_isort.py
|
Apache-2.0
|
def sort_imports(file: str, check_only: bool = True):
"""
Sort the imports defined in the `_import_structure` of a given init.
Args:
file (`str`): The path to the init to check/fix.
check_only (`bool`, *optional*, defaults to `True`): Whether or not to just check (and not auto-fix) the init.
"""
with open(file, encoding="utf-8") as f:
code = f.read()
# If the file is not a custom init, there is nothing to do.
if "_import_structure" not in code or "define_import_structure" in code:
return
# Blocks of indent level 0
main_blocks = split_code_in_indented_blocks(
code, start_prompt="_import_structure = {", end_prompt="if TYPE_CHECKING:"
)
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1, len(main_blocks) - 1):
# Check if the block contains some `_import_structure`s thingy to sort.
block = main_blocks[block_idx]
block_lines = block.split("\n")
# Get to the start of the imports.
line_idx = 0
while line_idx < len(block_lines) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
line_idx = len(block_lines)
else:
line_idx += 1
if line_idx >= len(block_lines):
continue
# Ignore beginning and last line: they don't contain anything.
internal_block_code = "\n".join(block_lines[line_idx:-1])
indent = get_indent(block_lines[1])
# Slit the internal block into blocks of indent level 1.
internal_blocks = split_code_in_indented_blocks(internal_block_code, indent_level=indent)
# We have two categories of import key: list or _import_structure[key].append/extend
pattern = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
keys = [(pattern.search(b).groups()[0] if pattern.search(b) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
keys_to_sort = [(i, key) for i, key in enumerate(keys) if key is not None]
sorted_indices = [x[0] for x in sorted(keys_to_sort, key=lambda x: x[1])]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
count = 0
reorderded_blocks = []
for i in range(len(internal_blocks)):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i])
else:
block = sort_objects_in_import(internal_blocks[sorted_indices[count]])
reorderded_blocks.append(block)
count += 1
# And we put our main block back together with its first and last line.
main_blocks[block_idx] = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]])
if code != "\n".join(main_blocks):
if check_only:
return True
else:
print(f"Overwriting {file}.")
with open(file, "w", encoding="utf-8") as f:
f.write("\n".join(main_blocks))
|
Sort the imports defined in the `_import_structure` of a given init.
Args:
file (`str`): The path to the init to check/fix.
check_only (`bool`, *optional*, defaults to `True`): Whether or not to just check (and not auto-fix) the init.
|
sort_imports
|
python
|
huggingface/transformers
|
utils/custom_init_isort.py
|
https://github.com/huggingface/transformers/blob/master/utils/custom_init_isort.py
|
Apache-2.0
|
def sort_imports_in_all_inits(check_only=True):
"""
Sort the imports defined in the `_import_structure` of all inits in the repo.
Args:
check_only (`bool`, *optional*, defaults to `True`): Whether or not to just check (and not auto-fix) the init.
"""
failures = []
for root, _, files in os.walk(PATH_TO_TRANSFORMERS):
if "__init__.py" in files:
result = sort_imports(os.path.join(root, "__init__.py"), check_only=check_only)
if result:
failures = [os.path.join(root, "__init__.py")]
if len(failures) > 0:
raise ValueError(f"Would overwrite {len(failures)} files, run `make style`.")
|
Sort the imports defined in the `_import_structure` of all inits in the repo.
Args:
check_only (`bool`, *optional*, defaults to `True`): Whether or not to just check (and not auto-fix) the init.
|
sort_imports_in_all_inits
|
python
|
huggingface/transformers
|
utils/custom_init_isort.py
|
https://github.com/huggingface/transformers/blob/master/utils/custom_init_isort.py
|
Apache-2.0
|
def update_main_init_file(models):
"""
Replace all instances of model.model_name with model.deprecated.model_name in the __init__.py file
Args:
models (List[str]): The models to mark as deprecated
"""
filename = REPO_PATH / "src/transformers/__init__.py"
with open(filename, "r") as f:
init_file = f.read()
# 1. For each model, find all the instances of model.model_name and replace with model.deprecated.model_name
for model in models:
init_file = init_file.replace(f'models.{model}"', f'models.deprecated.{model}"')
init_file = init_file.replace(f"models.{model} import", f"models.deprecated.{model} import")
with open(filename, "w") as f:
f.write(init_file)
# 2. Resort the imports
sort_imports_in_all_inits(check_only=False)
|
Replace all instances of model.model_name with model.deprecated.model_name in the __init__.py file
Args:
models (List[str]): The models to mark as deprecated
|
update_main_init_file
|
python
|
huggingface/transformers
|
utils/deprecate_models.py
|
https://github.com/huggingface/transformers/blob/master/utils/deprecate_models.py
|
Apache-2.0
|
def remove_model_references_from_file(filename, models, condition):
"""
Remove all references to the given models from the given file
Args:
filename (str): The file to remove the references from
models (List[str]): The models to remove
condition (Callable): A function that takes the line and model and returns True if the line should be removed
"""
filename = REPO_PATH / filename
with open(filename, "r") as f:
init_file = f.read()
new_file_lines = []
for i, line in enumerate(init_file.split("\n")):
if any(condition(line, model) for model in models):
continue
new_file_lines.append(line)
with open(filename, "w") as f:
f.write("\n".join(new_file_lines))
|
Remove all references to the given models from the given file
Args:
filename (str): The file to remove the references from
models (List[str]): The models to remove
condition (Callable): A function that takes the line and model and returns True if the line should be removed
|
remove_model_references_from_file
|
python
|
huggingface/transformers
|
utils/deprecate_models.py
|
https://github.com/huggingface/transformers/blob/master/utils/deprecate_models.py
|
Apache-2.0
|
def remove_model_config_classes_from_config_check(model_config_classes):
"""
Remove the deprecated model config classes from the check_config_attributes.py file
Args:
model_config_classes (List[str]): The model config classes to remove e.g. ["BertConfig", "DistilBertConfig"]
"""
filename = REPO_PATH / "utils/check_config_attributes.py"
with open(filename, "r") as f:
check_config_attributes = f.read()
# Keep track as we have to delete comment above too
in_special_cases_to_allow = False
in_indent = False
new_file_lines = []
for line in check_config_attributes.split("\n"):
indent = get_line_indent(line)
if (line.strip() == "SPECIAL_CASES_TO_ALLOW = {") or (line.strip() == "SPECIAL_CASES_TO_ALLOW.update("):
in_special_cases_to_allow = True
elif in_special_cases_to_allow and indent == 0 and line.strip() in ("}", ")"):
in_special_cases_to_allow = False
if in_indent:
if line.strip().endswith(("]", "],")):
in_indent = False
continue
if in_special_cases_to_allow and any(
model_config_class in line for model_config_class in model_config_classes
):
# Remove comments above the model config class to remove
while new_file_lines[-1].strip().startswith("#"):
new_file_lines.pop()
if line.strip().endswith("["):
in_indent = True
continue
elif any(model_config_class in line for model_config_class in model_config_classes):
continue
new_file_lines.append(line)
with open(filename, "w") as f:
f.write("\n".join(new_file_lines))
|
Remove the deprecated model config classes from the check_config_attributes.py file
Args:
model_config_classes (List[str]): The model config classes to remove e.g. ["BertConfig", "DistilBertConfig"]
|
remove_model_config_classes_from_config_check
|
python
|
huggingface/transformers
|
utils/deprecate_models.py
|
https://github.com/huggingface/transformers/blob/master/utils/deprecate_models.py
|
Apache-2.0
|
def add_models_to_deprecated_models_in_config_auto(models):
"""
Add the models to the DEPRECATED_MODELS list in configuration_auto.py and sorts the list
to be in alphabetical order.
"""
filepath = REPO_PATH / "src/transformers/models/auto/configuration_auto.py"
with open(filepath, "r") as f:
config_auto = f.read()
new_file_lines = []
deprecated_models_list = []
in_deprecated_models = False
for line in config_auto.split("\n"):
if line.strip() == "DEPRECATED_MODELS = [":
in_deprecated_models = True
new_file_lines.append(line)
elif in_deprecated_models and line.strip() == "]":
in_deprecated_models = False
# Add the new models to deprecated models list
deprecated_models_list.extend([f' "{model}", ' for model in models])
# Sort so they're in alphabetical order in the file
deprecated_models_list = sorted(deprecated_models_list)
new_file_lines.extend(deprecated_models_list)
# Make sure we still have the closing bracket
new_file_lines.append(line)
elif in_deprecated_models:
deprecated_models_list.append(line)
else:
new_file_lines.append(line)
with open(filepath, "w") as f:
f.write("\n".join(new_file_lines))
|
Add the models to the DEPRECATED_MODELS list in configuration_auto.py and sorts the list
to be in alphabetical order.
|
add_models_to_deprecated_models_in_config_auto
|
python
|
huggingface/transformers
|
utils/deprecate_models.py
|
https://github.com/huggingface/transformers/blob/master/utils/deprecate_models.py
|
Apache-2.0
|
def extract_warnings_from_single_artifact(artifact_path, targets):
"""Extract warnings from a downloaded artifact (in .zip format)"""
selected_warnings = set()
buffer = []
def parse_line(fp):
for line in fp:
if isinstance(line, bytes):
line = line.decode("UTF-8")
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(" "):
# process a single warning and move it to `selected_warnings`.
if len(buffer) > 0:
warning = "\n".join(buffer)
# Only keep the warnings specified in `targets`
if any(f": {x}: " in warning for x in targets):
selected_warnings.add(warning)
buffer.clear()
continue
else:
line = line.strip()
buffer.append(line)
if from_gh:
for filename in os.listdir(artifact_path):
file_path = os.path.join(artifact_path, filename)
if not os.path.isdir(file_path):
# read the file
if filename != "warnings.txt":
continue
with open(file_path) as fp:
parse_line(fp)
else:
try:
with zipfile.ZipFile(artifact_path) as z:
for filename in z.namelist():
if not os.path.isdir(filename):
# read the file
if filename != "warnings.txt":
continue
with z.open(filename) as fp:
parse_line(fp)
except Exception:
logger.warning(
f"{artifact_path} is either an invalid zip file or something else wrong. This file is skipped."
)
return selected_warnings
|
Extract warnings from a downloaded artifact (in .zip format)
|
extract_warnings_from_single_artifact
|
python
|
huggingface/transformers
|
utils/extract_warnings.py
|
https://github.com/huggingface/transformers/blob/master/utils/extract_warnings.py
|
Apache-2.0
|
def extract_warnings(artifact_dir, targets):
"""Extract warnings from all artifact files"""
selected_warnings = set()
paths = [os.path.join(artifact_dir, p) for p in os.listdir(artifact_dir) if (p.endswith(".zip") or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(p, targets))
return selected_warnings
|
Extract warnings from all artifact files
|
extract_warnings
|
python
|
huggingface/transformers
|
utils/extract_warnings.py
|
https://github.com/huggingface/transformers/blob/master/utils/extract_warnings.py
|
Apache-2.0
|
def get_jobs(workflow_run_id, token=None):
"""Extract jobs in a GitHub Actions workflow run"""
headers = None
if token is not None:
headers = {"Accept": "application/vnd.github+json", "Authorization": f"Bearer {token}"}
url = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
result = requests.get(url, headers=headers).json()
jobs = []
try:
jobs.extend(result["jobs"])
pages_to_iterate_over = math.ceil((result["total_count"] - 100) / 100)
for i in range(pages_to_iterate_over):
result = requests.get(url + f"&page={i + 2}", headers=headers).json()
jobs.extend(result["jobs"])
return jobs
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}")
return []
|
Extract jobs in a GitHub Actions workflow run
|
get_jobs
|
python
|
huggingface/transformers
|
utils/get_ci_error_statistics.py
|
https://github.com/huggingface/transformers/blob/master/utils/get_ci_error_statistics.py
|
Apache-2.0
|
def get_job_links(workflow_run_id, token=None):
"""Extract job names and their job links in a GitHub Actions workflow run"""
headers = None
if token is not None:
headers = {"Accept": "application/vnd.github+json", "Authorization": f"Bearer {token}"}
url = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
result = requests.get(url, headers=headers).json()
job_links = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]})
pages_to_iterate_over = math.ceil((result["total_count"] - 100) / 100)
for i in range(pages_to_iterate_over):
result = requests.get(url + f"&page={i + 2}", headers=headers).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]})
return job_links
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}")
return {}
|
Extract job names and their job links in a GitHub Actions workflow run
|
get_job_links
|
python
|
huggingface/transformers
|
utils/get_ci_error_statistics.py
|
https://github.com/huggingface/transformers/blob/master/utils/get_ci_error_statistics.py
|
Apache-2.0
|
def get_artifacts_links(worflow_run_id, token=None):
"""Get all artifact links from a workflow run"""
headers = None
if token is not None:
headers = {"Accept": "application/vnd.github+json", "Authorization": f"Bearer {token}"}
url = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
result = requests.get(url, headers=headers).json()
artifacts = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]})
pages_to_iterate_over = math.ceil((result["total_count"] - 100) / 100)
for i in range(pages_to_iterate_over):
result = requests.get(url + f"&page={i + 2}", headers=headers).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]})
return artifacts
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}")
return {}
|
Get all artifact links from a workflow run
|
get_artifacts_links
|
python
|
huggingface/transformers
|
utils/get_ci_error_statistics.py
|
https://github.com/huggingface/transformers/blob/master/utils/get_ci_error_statistics.py
|
Apache-2.0
|
def download_artifact(artifact_name, artifact_url, output_dir, token):
"""Download a GitHub Action artifact from a URL.
The URL is of the form `https://api.github.com/repos/huggingface/transformers/actions/artifacts/{ARTIFACT_ID}/zip`,
but it can't be used to download directly. We need to get a redirect URL first.
See https://docs.github.com/en/rest/actions/artifacts#download-an-artifact
"""
headers = None
if token is not None:
headers = {"Accept": "application/vnd.github+json", "Authorization": f"Bearer {token}"}
result = requests.get(artifact_url, headers=headers, allow_redirects=False)
download_url = result.headers["Location"]
response = requests.get(download_url, allow_redirects=True)
file_path = os.path.join(output_dir, f"{artifact_name}.zip")
with open(file_path, "wb") as fp:
fp.write(response.content)
|
Download a GitHub Action artifact from a URL.
The URL is of the form `https://api.github.com/repos/huggingface/transformers/actions/artifacts/{ARTIFACT_ID}/zip`,
but it can't be used to download directly. We need to get a redirect URL first.
See https://docs.github.com/en/rest/actions/artifacts#download-an-artifact
|
download_artifact
|
python
|
huggingface/transformers
|
utils/get_ci_error_statistics.py
|
https://github.com/huggingface/transformers/blob/master/utils/get_ci_error_statistics.py
|
Apache-2.0
|
def get_errors_from_single_artifact(artifact_zip_path, job_links=None):
"""Extract errors from a downloaded artifact (in .zip format)"""
errors = []
failed_tests = []
job_name = None
with zipfile.ZipFile(artifact_zip_path) as z:
for filename in z.namelist():
if not os.path.isdir(filename):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(filename) as f:
for line in f:
line = line.decode("UTF-8").strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
error_line = line[: line.index(": ")]
error = line[line.index(": ") + len(": ") :]
errors.append([error_line, error])
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED "):
# `test` is the test method that failed
test = line[len("FAILED ") :]
failed_tests.append(test)
elif filename == "job_name.txt":
job_name = line
if len(errors) != len(failed_tests):
raise ValueError(
f"`errors` and `failed_tests` should have the same number of elements. Got {len(errors)} for `errors` "
f"and {len(failed_tests)} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
" problem."
)
job_link = None
if job_name and job_links:
job_link = job_links.get(job_name, None)
# A list with elements of the form (line of error, error, failed test)
result = [x + [y] + [job_link] for x, y in zip(errors, failed_tests)]
return result
|
Extract errors from a downloaded artifact (in .zip format)
|
get_errors_from_single_artifact
|
python
|
huggingface/transformers
|
utils/get_ci_error_statistics.py
|
https://github.com/huggingface/transformers/blob/master/utils/get_ci_error_statistics.py
|
Apache-2.0
|
def get_all_errors(artifact_dir, job_links=None):
"""Extract errors from all artifact files"""
errors = []
paths = [os.path.join(artifact_dir, p) for p in os.listdir(artifact_dir) if p.endswith(".zip")]
for p in paths:
errors.extend(get_errors_from_single_artifact(p, job_links=job_links))
return errors
|
Extract errors from all artifact files
|
get_all_errors
|
python
|
huggingface/transformers
|
utils/get_ci_error_statistics.py
|
https://github.com/huggingface/transformers/blob/master/utils/get_ci_error_statistics.py
|
Apache-2.0
|
def get_model(test):
"""Get the model name from a test method"""
test = test.split("::")[0]
if test.startswith("tests/models/"):
test = test.split("/")[2]
else:
test = None
return test
|
Get the model name from a test method
|
get_model
|
python
|
huggingface/transformers
|
utils/get_ci_error_statistics.py
|
https://github.com/huggingface/transformers/blob/master/utils/get_ci_error_statistics.py
|
Apache-2.0
|
def extract_time_from_single_job(job):
"""Extract time info from a single job in a GitHub Actions workflow run"""
job_info = {}
start = job["started_at"]
end = job["completed_at"]
start_datetime = date_parser.parse(start)
end_datetime = date_parser.parse(end)
duration_in_min = round((end_datetime - start_datetime).total_seconds() / 60.0)
job_info["started_at"] = start
job_info["completed_at"] = end
job_info["duration"] = duration_in_min
return job_info
|
Extract time info from a single job in a GitHub Actions workflow run
|
extract_time_from_single_job
|
python
|
huggingface/transformers
|
utils/get_github_job_time.py
|
https://github.com/huggingface/transformers/blob/master/utils/get_github_job_time.py
|
Apache-2.0
|
def get_job_time(workflow_run_id, token=None):
"""Extract time info for all jobs in a GitHub Actions workflow run"""
headers = None
if token is not None:
headers = {"Accept": "application/vnd.github+json", "Authorization": f"Bearer {token}"}
url = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
result = requests.get(url, headers=headers).json()
job_time = {}
try:
job_time.update({job["name"]: extract_time_from_single_job(job) for job in result["jobs"]})
pages_to_iterate_over = math.ceil((result["total_count"] - 100) / 100)
for i in range(pages_to_iterate_over):
result = requests.get(url + f"&page={i + 2}", headers=headers).json()
job_time.update({job["name"]: extract_time_from_single_job(job) for job in result["jobs"]})
return job_time
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}")
return {}
|
Extract time info for all jobs in a GitHub Actions workflow run
|
get_job_time
|
python
|
huggingface/transformers
|
utils/get_github_job_time.py
|
https://github.com/huggingface/transformers/blob/master/utils/get_github_job_time.py
|
Apache-2.0
|
def get_daily_ci_runs(token, num_runs=7, workflow_id=None):
"""Get the workflow runs of the scheduled (daily) CI.
This only selects the runs triggered by the `schedule` event on the `main` branch.
"""
headers = None
if token is not None:
headers = {"Accept": "application/vnd.github+json", "Authorization": f"Bearer {token}"}
# The id of a workflow (not of a workflow run).
# From a given workflow run (where we have workflow run id), we can get the workflow id by going to
# https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}
# and check the `workflow_id` key.
if not workflow_id:
workflow_run_id = os.environ["GITHUB_RUN_ID"]
workflow_run = requests.get(
f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}", headers=headers
).json()
workflow_id = workflow_run["workflow_id"]
url = f"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f"?branch=main&exclude_pull_requests=true&per_page={num_runs}"
result = requests.get(f"{url}&event=schedule", headers=headers).json()
workflow_runs = result["workflow_runs"]
if len(workflow_runs) == 0:
result = requests.get(f"{url}&event=workflow_run", headers=headers).json()
workflow_runs = result["workflow_runs"]
return workflow_runs
|
Get the workflow runs of the scheduled (daily) CI.
This only selects the runs triggered by the `schedule` event on the `main` branch.
|
get_daily_ci_runs
|
python
|
huggingface/transformers
|
utils/get_previous_daily_ci.py
|
https://github.com/huggingface/transformers/blob/master/utils/get_previous_daily_ci.py
|
Apache-2.0
|
def get_last_daily_ci_run(token, workflow_run_id=None, workflow_id=None, commit_sha=None):
"""Get the last completed workflow run id of the scheduled (daily) CI."""
headers = None
if token is not None:
headers = {"Accept": "application/vnd.github+json", "Authorization": f"Bearer {token}"}
workflow_run = None
if workflow_run_id is not None and workflow_run_id != "":
workflow_run = requests.get(
f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}", headers=headers
).json()
return workflow_run
workflow_runs = get_daily_ci_runs(token, workflow_id=workflow_id)
for run in workflow_runs:
if commit_sha in [None, ""] and run["status"] == "completed":
workflow_run = run
break
# if `commit_sha` is specified, return the latest completed run with `workflow_run["head_sha"]` matching the specified sha.
elif commit_sha not in [None, ""] and run["head_sha"] == commit_sha and run["status"] == "completed":
workflow_run = run
break
return workflow_run
|
Get the last completed workflow run id of the scheduled (daily) CI.
|
get_last_daily_ci_run
|
python
|
huggingface/transformers
|
utils/get_previous_daily_ci.py
|
https://github.com/huggingface/transformers/blob/master/utils/get_previous_daily_ci.py
|
Apache-2.0
|
def get_last_daily_ci_workflow_run_id(token, workflow_run_id=None, workflow_id=None, commit_sha=None):
"""Get the last completed workflow run id of the scheduled (daily) CI."""
if workflow_run_id is not None and workflow_run_id != "":
return workflow_run_id
workflow_run = get_last_daily_ci_run(token, workflow_id=workflow_id, commit_sha=commit_sha)
workflow_run_id = None
if workflow_run is not None:
workflow_run_id = workflow_run["id"]
return workflow_run_id
|
Get the last completed workflow run id of the scheduled (daily) CI.
|
get_last_daily_ci_workflow_run_id
|
python
|
huggingface/transformers
|
utils/get_previous_daily_ci.py
|
https://github.com/huggingface/transformers/blob/master/utils/get_previous_daily_ci.py
|
Apache-2.0
|
def get_last_daily_ci_artifacts(
artifact_names, output_dir, token, workflow_run_id=None, workflow_id=None, commit_sha=None
):
"""Get the artifacts of last completed workflow run id of the scheduled (daily) CI."""
workflow_run_id = get_last_daily_ci_workflow_run_id(
token, workflow_run_id=workflow_run_id, workflow_id=workflow_id, commit_sha=commit_sha
)
if workflow_run_id is not None:
artifacts_links = get_artifacts_links(worflow_run_id=workflow_run_id, token=token)
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
artifact_url = artifacts_links[artifact_name]
download_artifact(
artifact_name=artifact_name, artifact_url=artifact_url, output_dir=output_dir, token=token
)
|
Get the artifacts of last completed workflow run id of the scheduled (daily) CI.
|
get_last_daily_ci_artifacts
|
python
|
huggingface/transformers
|
utils/get_previous_daily_ci.py
|
https://github.com/huggingface/transformers/blob/master/utils/get_previous_daily_ci.py
|
Apache-2.0
|
def get_last_daily_ci_reports(
artifact_names, output_dir, token, workflow_run_id=None, workflow_id=None, commit_sha=None
):
"""Get the artifacts' content of the last completed workflow run id of the scheduled (daily) CI."""
get_last_daily_ci_artifacts(
artifact_names,
output_dir,
token,
workflow_run_id=workflow_run_id,
workflow_id=workflow_id,
commit_sha=commit_sha,
)
results = {}
for artifact_name in artifact_names:
artifact_zip_path = os.path.join(output_dir, f"{artifact_name}.zip")
if os.path.isfile(artifact_zip_path):
results[artifact_name] = {}
with zipfile.ZipFile(artifact_zip_path) as z:
for filename in z.namelist():
if not os.path.isdir(filename):
# read the file
with z.open(filename) as f:
results[artifact_name][filename] = f.read().decode("UTF-8")
return results
|
Get the artifacts' content of the last completed workflow run id of the scheduled (daily) CI.
|
get_last_daily_ci_reports
|
python
|
huggingface/transformers
|
utils/get_previous_daily_ci.py
|
https://github.com/huggingface/transformers/blob/master/utils/get_previous_daily_ci.py
|
Apache-2.0
|
def get_module_path(test_file):
"""Return the module path of a model test file."""
components = test_file.split(os.path.sep)
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
f"{test_file} instead."
)
test_fn = components[-1]
if not test_fn.endswith("py"):
raise ValueError(f"`test_file` should be a python file. Got {test_fn} instead.")
if not test_fn.startswith("test_modeling_"):
raise ValueError(
f"`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead."
)
components = components[:-1] + [test_fn.replace(".py", "")]
test_module_path = ".".join(components)
return test_module_path
|
Return the module path of a model test file.
|
get_module_path
|
python
|
huggingface/transformers
|
utils/get_test_info.py
|
https://github.com/huggingface/transformers/blob/master/utils/get_test_info.py
|
Apache-2.0
|
def get_test_module(test_file):
"""Get the module of a model test file."""
test_module_path = get_module_path(test_file)
try:
test_module = importlib.import_module(test_module_path)
except AttributeError as exc:
# e.g. if you have a `tests` folder in `site-packages`, created by another package, when trying to import
# `tests.models...`
raise ValueError(
f"Could not import module {test_module_path}. Confirm that you don't have a package with the same root "
"name installed or in your environment's `site-packages`."
) from exc
return test_module
|
Get the module of a model test file.
|
get_test_module
|
python
|
huggingface/transformers
|
utils/get_test_info.py
|
https://github.com/huggingface/transformers/blob/master/utils/get_test_info.py
|
Apache-2.0
|
def get_tester_classes(test_file):
"""Get all classes in a model test file whose names ends with `ModelTester`."""
tester_classes = []
test_module = get_test_module(test_file)
for attr in dir(test_module):
if attr.endswith("ModelTester"):
tester_classes.append(getattr(test_module, attr))
# sort with class names
return sorted(tester_classes, key=lambda x: x.__name__)
|
Get all classes in a model test file whose names ends with `ModelTester`.
|
get_tester_classes
|
python
|
huggingface/transformers
|
utils/get_test_info.py
|
https://github.com/huggingface/transformers/blob/master/utils/get_test_info.py
|
Apache-2.0
|
def get_test_classes(test_file):
"""Get all [test] classes in a model test file with attribute `all_model_classes` that are non-empty.
These are usually the (model) test classes containing the (non-slow) tests to run and are subclasses of one of the
classes `ModelTesterMixin`, `TFModelTesterMixin` or `FlaxModelTesterMixin`, as well as a subclass of
`unittest.TestCase`. Exceptions include `RagTestMixin` (and its subclasses).
"""
test_classes = []
test_module = get_test_module(test_file)
for attr in dir(test_module):
attr_value = getattr(test_module, attr)
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
model_classes = getattr(attr_value, "all_model_classes", [])
if len(model_classes) > 0:
test_classes.append(attr_value)
# sort with class names
return sorted(test_classes, key=lambda x: x.__name__)
|
Get all [test] classes in a model test file with attribute `all_model_classes` that are non-empty.
These are usually the (model) test classes containing the (non-slow) tests to run and are subclasses of one of the
classes `ModelTesterMixin`, `TFModelTesterMixin` or `FlaxModelTesterMixin`, as well as a subclass of
`unittest.TestCase`. Exceptions include `RagTestMixin` (and its subclasses).
|
get_test_classes
|
python
|
huggingface/transformers
|
utils/get_test_info.py
|
https://github.com/huggingface/transformers/blob/master/utils/get_test_info.py
|
Apache-2.0
|
def get_model_classes(test_file):
"""Get all model classes that appear in `all_model_classes` attributes in a model test file."""
test_classes = get_test_classes(test_file)
model_classes = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes)
# sort with class names
return sorted(model_classes, key=lambda x: x.__name__)
|
Get all model classes that appear in `all_model_classes` attributes in a model test file.
|
get_model_classes
|
python
|
huggingface/transformers
|
utils/get_test_info.py
|
https://github.com/huggingface/transformers/blob/master/utils/get_test_info.py
|
Apache-2.0
|
def get_model_tester_from_test_class(test_class):
"""Get the model tester class of a model test class."""
test = test_class()
if hasattr(test, "setUp"):
test.setUp()
model_tester = None
if hasattr(test, "model_tester"):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
model_tester = test.model_tester.__class__
return model_tester
|
Get the model tester class of a model test class.
|
get_model_tester_from_test_class
|
python
|
huggingface/transformers
|
utils/get_test_info.py
|
https://github.com/huggingface/transformers/blob/master/utils/get_test_info.py
|
Apache-2.0
|
def get_test_classes_for_model(test_file, model_class):
"""Get all [test] classes in `test_file` that have `model_class` in their `all_model_classes`."""
test_classes = get_test_classes(test_file)
target_test_classes = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(test_class)
# sort with class names
return sorted(target_test_classes, key=lambda x: x.__name__)
|
Get all [test] classes in `test_file` that have `model_class` in their `all_model_classes`.
|
get_test_classes_for_model
|
python
|
huggingface/transformers
|
utils/get_test_info.py
|
https://github.com/huggingface/transformers/blob/master/utils/get_test_info.py
|
Apache-2.0
|
def get_tester_classes_for_model(test_file, model_class):
"""Get all model tester classes in `test_file` that are associated to `model_class`."""
test_classes = get_test_classes_for_model(test_file, model_class)
tester_classes = []
for test_class in test_classes:
tester_class = get_model_tester_from_test_class(test_class)
if tester_class is not None:
tester_classes.append(tester_class)
# sort with class names
return sorted(tester_classes, key=lambda x: x.__name__)
|
Get all model tester classes in `test_file` that are associated to `model_class`.
|
get_tester_classes_for_model
|
python
|
huggingface/transformers
|
utils/get_test_info.py
|
https://github.com/huggingface/transformers/blob/master/utils/get_test_info.py
|
Apache-2.0
|
def get_test_to_tester_mapping(test_file):
"""Get a mapping from [test] classes to model tester classes in `test_file`.
This uses `get_test_classes` which may return classes that are NOT subclasses of `unittest.TestCase`.
"""
test_classes = get_test_classes(test_file)
test_tester_mapping = {test_class: get_model_tester_from_test_class(test_class) for test_class in test_classes}
return test_tester_mapping
|
Get a mapping from [test] classes to model tester classes in `test_file`.
This uses `get_test_classes` which may return classes that are NOT subclasses of `unittest.TestCase`.
|
get_test_to_tester_mapping
|
python
|
huggingface/transformers
|
utils/get_test_info.py
|
https://github.com/huggingface/transformers/blob/master/utils/get_test_info.py
|
Apache-2.0
|
def get_model_to_test_mapping(test_file):
"""Get a mapping from model classes to test classes in `test_file`."""
model_classes = get_model_classes(test_file)
model_test_mapping = {
model_class: get_test_classes_for_model(test_file, model_class) for model_class in model_classes
}
return model_test_mapping
|
Get a mapping from model classes to test classes in `test_file`.
|
get_model_to_test_mapping
|
python
|
huggingface/transformers
|
utils/get_test_info.py
|
https://github.com/huggingface/transformers/blob/master/utils/get_test_info.py
|
Apache-2.0
|
def get_model_to_tester_mapping(test_file):
"""Get a mapping from model classes to model tester classes in `test_file`."""
model_classes = get_model_classes(test_file)
model_to_tester_mapping = {
model_class: get_tester_classes_for_model(test_file, model_class) for model_class in model_classes
}
return model_to_tester_mapping
|
Get a mapping from model classes to model tester classes in `test_file`.
|
get_model_to_tester_mapping
|
python
|
huggingface/transformers
|
utils/get_test_info.py
|
https://github.com/huggingface/transformers/blob/master/utils/get_test_info.py
|
Apache-2.0
|
def to_json(o):
"""Make the information succinct and easy to read.
Avoid the full class representation like `<class 'transformers.models.bert.modeling_bert.BertForMaskedLM'>` when
displaying the results. Instead, we use class name (`BertForMaskedLM`) for the readability.
"""
if isinstance(o, str):
return o
elif isinstance(o, type):
return o.__name__
elif isinstance(o, (list, tuple)):
return [to_json(x) for x in o]
elif isinstance(o, dict):
return {to_json(k): to_json(v) for k, v in o.items()}
else:
return o
|
Make the information succinct and easy to read.
Avoid the full class representation like `<class 'transformers.models.bert.modeling_bert.BertForMaskedLM'>` when
displaying the results. Instead, we use class name (`BertForMaskedLM`) for the readability.
|
to_json
|
python
|
huggingface/transformers
|
utils/get_test_info.py
|
https://github.com/huggingface/transformers/blob/master/utils/get_test_info.py
|
Apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.