code
stringlengths
66
870k
docstring
stringlengths
19
26.7k
func_name
stringlengths
1
138
language
stringclasses
1 value
repo
stringlengths
7
68
path
stringlengths
5
324
url
stringlengths
46
389
license
stringclasses
7 values
def get_cased_name(lowercase_name: str) -> str: """From a model name in lowercase in the format `my_model`, return the cased name in the format `MyModel`.""" alt_lowercase_name = lowercase_name.replace("_", "-") if lowercase_name in CONFIG_MAPPING_NAMES: return CONFIG_MAPPING_NAMES[lowercase_name].replace("Config", "") elif alt_lowercase_name in CONFIG_MAPPING_NAMES: return CONFIG_MAPPING_NAMES[alt_lowercase_name].replace("Config", "") else: return "".join(x.title() for x in lowercase_name.split("_"))
From a model name in lowercase in the format `my_model`, return the cased name in the format `MyModel`.
get_cased_name
python
huggingface/transformers
utils/modular_model_converter.py
https://github.com/huggingface/transformers/blob/master/utils/modular_model_converter.py
Apache-2.0
def get_lowercase_name(cased_name: str) -> str: """From a model name in Camelcase in the format `MyModel`, return the lowercase name in the format `my_model`.""" inverse_mapping = {value: key for key, value in CONFIG_MAPPING_NAMES.items()} if cased_name + "Config" in inverse_mapping: return inverse_mapping[cased_name + "Config"] else: return "_".join([s.lower() for s in re.findall(r"[A-Z][^A-Z]*", cased_name)])
From a model name in Camelcase in the format `MyModel`, return the lowercase name in the format `my_model`.
get_lowercase_name
python
huggingface/transformers
utils/modular_model_converter.py
https://github.com/huggingface/transformers/blob/master/utils/modular_model_converter.py
Apache-2.0
def leave_ImportFrom(self, original_node, updated_node): """The imports from other file types (configuration, processing etc) should use original model name.""" if self.original_new_model_name != self.new_name and m.matches(updated_node.module, m.Name()): patterns = "|".join(ALL_FILE_TYPES) regex = rf"({patterns})_{self.new_name}" new_source = re.sub( regex, lambda m: f"{m.group(1)}_{self.original_new_model_name}", updated_node.module.value ) updated_node = updated_node.with_changes(module=updated_node.module.with_changes(value=new_source)) return updated_node
The imports from other file types (configuration, processing etc) should use original model name.
leave_ImportFrom
python
huggingface/transformers
utils/modular_model_converter.py
https://github.com/huggingface/transformers/blob/master/utils/modular_model_converter.py
Apache-2.0
def get_full_attribute_name(node: Union[cst.Attribute, cst.Name]) -> Optional[str]: """Get the full name of an Attribute or Name node (e.g. `"nn.Module"` for an Attribute representing it). If the successive value of an Attribute are not Name nodes, return `None`.""" if m.matches(node, m.Name()): return node.value elif m.matches(node, m.Attribute()): if not m.matches(node.attr, m.Name()): return None name = node.attr.value new_node = node.value while m.matches(new_node, m.Attribute()): if not m.matches(new_node.attr, m.Name()): return None name = new_node.attr.value + "." + name new_node = new_node.value if not m.matches(new_node, m.Name()): return None return new_node.value + "." + name return None
Get the full name of an Attribute or Name node (e.g. `"nn.Module"` for an Attribute representing it). If the successive value of an Attribute are not Name nodes, return `None`.
get_full_attribute_name
python
huggingface/transformers
utils/modular_model_converter.py
https://github.com/huggingface/transformers/blob/master/utils/modular_model_converter.py
Apache-2.0
def is_full_docstring(original_docstring: str, new_docstring: str, original_level: int) -> bool: """Check if `new_docstring` is a full docstring, or if it is only part of a docstring that should then be merged with the existing old one. """ # libcst returns the docstrinbgs with literal `r"""` quotes in front new_docstring = new_docstring.split('"""', 1)[1] # The docstring contains Args definition, so it is self-contained if re.search(r"\n\s*Args:\n", new_docstring): return True elif re.search(r"\n\s*Args:\n", original_docstring): return False # Check if the docstring contains args docstring (meaning it is self contained): param_pattern = re.compile( # |--- Group 1 ---|| Group 2 ||- Group 3 -||---------- Group 4 ----------| rf"^\s{{0,{original_level}}}(\w+)\s*\(\s*([^, \)]*)(\s*.*?)\s*\)\s*:\s*((?:(?!\n^\s{{0,{original_level}}}\w+\s*\().)*)", re.DOTALL | re.MULTILINE, ) match_object = param_pattern.search(new_docstring) if match_object is not None: return True # If it contains Returns, but starts with text indented with an additional 4 spaces before, it is self-contained # (this is the scenario when using `@add_start_docstrings_to_model_forward`, but adding more args to docstring) match_object = re.search(r"\n([^\S\n]*)Returns:\n", new_docstring) if match_object is not None: full_indent = match_object.group(1) striped_doc = new_docstring.strip("\n") if striped_doc.startswith(full_indent + " " * 4) or striped_doc.startswith(full_indent + "\t"): return True return False
Check if `new_docstring` is a full docstring, or if it is only part of a docstring that should then be merged with the existing old one.
is_full_docstring
python
huggingface/transformers
utils/modular_model_converter.py
https://github.com/huggingface/transformers/blob/master/utils/modular_model_converter.py
Apache-2.0
def update_body(self, existing_body, new_statements): """ Helper method to update the body by removing duplicates before adding new statements. `existing_body` is the body of the original method, the parent class `new_statements` are the additional statements """ deduplicated_new_body = [] existing_nodes = set() for node in new_statements: if m.matches(node, m.SimpleStatementLine(body=[m.Assign()])): target = self.python_module.code_for_node(node.body[0].targets[0].target) self.all_assign_target[target] = node if m.matches(node, m.SimpleStatementLine(body=[m.Del()])): target = self.python_module.code_for_node(node.body[0].target) self.deleted_targets[target] = node for stmt in existing_body: if m.matches(stmt, m.SimpleStatementLine(body=[m.Assign()])): target = self.python_module.code_for_node(stmt.body[0].targets[0].target) if target in self.deleted_targets: continue if target in self.all_assign_target: stmt = self.all_assign_target[target] # Skip the docstring (will be added later on, at the beginning) elif m.matches(stmt, DOCSTRING_NODE): continue comment_less_code = re.sub(r"#.*", "", self.python_module.code_for_node(stmt)).strip() comment_less_code = re.sub(r"\ *\n", "\n", comment_less_code).strip() deduplicated_new_body.append(stmt) existing_nodes.add(comment_less_code) for node in new_statements: code = self.python_module.code_for_node(node) comment_less_code = re.sub(r"#.*", "", code).strip() comment_less_code = re.sub(r"\ *\n", "\n", comment_less_code).strip() if node not in deduplicated_new_body and comment_less_code not in existing_nodes: if not m.matches(node, m.SimpleStatementLine(body=[m.Del()])): deduplicated_new_body.append(node) existing_nodes.add(comment_less_code) deduplicated_new_body = self._fix_post_init_location(deduplicated_new_body) return deduplicated_new_body
Helper method to update the body by removing duplicates before adding new statements. `existing_body` is the body of the original method, the parent class `new_statements` are the additional statements
update_body
python
huggingface/transformers
utils/modular_model_converter.py
https://github.com/huggingface/transformers/blob/master/utils/modular_model_converter.py
Apache-2.0
def _fix_post_init_location(self, new_body: list[cst.CSTNode]): """Fix the location of the `post_init()` in the new body, if we added statements after the call to `super()` (it needs to be the very last statement called)""" # Fix the post_init() that has to be last for i, node in enumerate(new_body): code = self.python_module.code_for_node(node) comment_less_code = re.sub(r"#.*", "", code).strip() comment_less_code = re.sub(r"\ *\n", "\n", comment_less_code).strip() if "self.post_init(" in comment_less_code and i < len(new_body) - 1: # Remove it and add it again at the end new_body.pop(i) new_body.append(node) break return new_body
Fix the location of the `post_init()` in the new body, if we added statements after the call to `super()` (it needs to be the very last statement called)
_fix_post_init_location
python
huggingface/transformers
utils/modular_model_converter.py
https://github.com/huggingface/transformers/blob/master/utils/modular_model_converter.py
Apache-2.0
def _fix_init_location(self, new_body): """Fix the location of the `super().__init__()` in the new body, if we had new statements before it.""" start_index = 0 for i, node in enumerate(new_body): if m.matches(node, DOCSTRING_NODE) and i == start_index: start_index += 1 continue code = self.python_module.code_for_node(node) comment_less_code = re.sub(r"#.*", "", code).strip() comment_less_code = re.sub(r"\ *\n", "\n", comment_less_code).strip() if "super().__init__" in comment_less_code and i > start_index: # Remove it and add it again at the top after the docstrings node = new_body.pop(i) new_body = new_body[:start_index] + [node] + new_body[start_index:] break return new_body
Fix the location of the `super().__init__()` in the new body, if we had new statements before it.
_fix_init_location
python
huggingface/transformers
utils/modular_model_converter.py
https://github.com/huggingface/transformers/blob/master/utils/modular_model_converter.py
Apache-2.0
def replace_super_calls(self, node: cst.IndentedBlock, func_name: str) -> cst.CSTNode: """Updates the body of the input `node`'s `func_name` function by replacing calls to super().func_name() with the source code of the parent class' `func_name`. It keeps everything that is defined before `super().func_name()`. """ self.has_docstring = False parent_has_docstring = False if func_name in self.original_methods: parent_has_docstring = m.matches(self.original_methods[func_name].body.body[0], DOCSTRING_NODE) new_body = [] has_super_call = False for i, expr in enumerate(node.body): if is_call_to_super(expr, func_name): has_super_call = True new_body.extend(self.update_body(self.original_methods[func_name].body.body, node.body[i + 1 :])) new_body = self._fix_init_location(new_body) else: expr = expr.visit(self.transformer) if m.matches(expr, DOCSTRING_NODE): self.has_docstring = True if parent_has_docstring: # actually here we ought to de-duplicate? original_docstring = self.original_methods[func_name].body.body[0].body[0].value.value updated_docstring = expr.body[0].value.value merged_doc = merge_docstrings(original_docstring, updated_docstring) new_node = [expr.with_changes(body=[cst.Expr(value=cst.SimpleString(value=merged_doc))])] else: new_node = [expr] new_body.extend(new_node) elif not m.matches(expr, m.SimpleStatementLine(body=[m.Del()])) and not has_super_call: new_body.append(expr) if not self.has_docstring and parent_has_docstring: new_body = [self.original_methods[func_name].body.body[0]] + new_body return node.with_changes(body=new_body)
Updates the body of the input `node`'s `func_name` function by replacing calls to super().func_name() with the source code of the parent class' `func_name`. It keeps everything that is defined before `super().func_name()`.
replace_super_calls
python
huggingface/transformers
utils/modular_model_converter.py
https://github.com/huggingface/transformers/blob/master/utils/modular_model_converter.py
Apache-2.0
def leave_Return(self, original_node: cst.Return, updated_node: cst.Return) -> cst.CSTNode: """ "When a return statement is reached, it is replaced with the unrolled super code""" if m.matches(updated_node.value, m.Call(func=m.Attribute(attr=m.Name("super")))): func_def = self.get_metadata(ParentNodeProvider, original_node) if m.matched(func_def, m.FunctionDef()) and func_def.name.value in self.original_methods: updated_return_value = updated_node.value.with_changes( args=[ cst.Arg( value=cst.Call(func=cst.Name("super"), args=[cst.Arg(value=cst.Name(func_def.name.value))]) ) ] ) return updated_node.with_changes(value=updated_return_value) return updated_node
"When a return statement is reached, it is replaced with the unrolled super code
leave_Return
python
huggingface/transformers
utils/modular_model_converter.py
https://github.com/huggingface/transformers/blob/master/utils/modular_model_converter.py
Apache-2.0
def find_all_dependencies( dependency_mapping: Dict[str, set], start_entity: Optional[str] = None, initial_dependencies: Optional[set] = None, initial_checked_dependencies: Optional[set] = None, return_parent: bool = False, ) -> Union[list, set]: """Return all the dependencies of the given `start_entity` or `initial_dependencies`. This is basically some kind of BFS traversal algorithm. It can either start from `start_entity`, or `initial_dependencies`. Args: dependency_mapping (`Dict[str, set]`): A mapping from entities (usually function/assignment names), to immediate dependencies. That is, for function names, a mapping {"foo": {"bar", "test"}} would indicate that functions `bar` and `test` are immediately called in `foo`'s definition. start_entity (str | None, *optional*): A key of `dependency_mapping`, indicating from which entity to start the search. initial_dependencies (set | None, *optional*): If `start_entity` is not provided, this can be used as an alternative. In this case, the search will continue from all the entities in `initial_dependencies`, if they are in `dependency_mapping`. initial_checked_dependencies (set | None, *optional*): If provided, entities already present in `initial_checked_dependencies` will not be part of the returned dependencies. return_parent (bool, *optional*): If `True`, will return a list consisting of tuples (dependency, parent) instead of a simple set of dependencies. Note that the order of the items in the list reflects the traversal order. Thus, no parent can ever appear before childs. Returns: A set of all the dependencies, or a list of tuples `(dependency, parent)` if `return_parent=True`. Example: Given the following structure in the `modular_xxx.py` file: ``` def foo1(): pass def foo2(): pass def bar(): foo1() def foobar(): bar() foo2() class MyLayer(SomeOtherModelLayer): def forward(...): foobar() ``` and the `dependency_mapping` created when visiting the `modular_xxx.py` file, we get: ``` dependency_mapping = {'bar': {'foo1'}, 'foobar': {'bar', 'foo2'}} find_all_dependencies(dependency_mapping, start_entity='foobar', return_parent=True) >>> [('bar', 'foobar'), ('foo2', 'foobar'), ('foo1', 'bar')] ``` That is, all the functions needed (and potentially their immediate parent) so that the function to be added in MyLayer (`foobar`) can work correctly. """ if initial_dependencies is None and start_entity is not None: initial_dependencies = dependency_mapping[start_entity] if initial_checked_dependencies is None: initial_checked_dependencies = set() dependency_queue = deque(initial_dependencies) all_dependencies = set() all_dependencies_with_parent = [] checked_dependencies = set(initial_checked_dependencies) parents = dict.fromkeys(initial_dependencies, start_entity) while len(dependency_queue) > 0: # Pick element to visit current = dependency_queue.popleft() if current not in checked_dependencies: # Add the dependencies all_dependencies.add(current) all_dependencies_with_parent += [(current, parents[current])] if current in dependency_mapping.keys(): # Update dependency queue dependency_queue.extend(dependency_mapping[current]) parents.update(dict.fromkeys(dependency_mapping[current], current)) # add visited node to the list checked_dependencies.add(current) if not return_parent: return all_dependencies # no child can ever appear before its parent thanks to the queue (needed to add them at the correct location in the body later) return all_dependencies_with_parent
Return all the dependencies of the given `start_entity` or `initial_dependencies`. This is basically some kind of BFS traversal algorithm. It can either start from `start_entity`, or `initial_dependencies`. Args: dependency_mapping (`Dict[str, set]`): A mapping from entities (usually function/assignment names), to immediate dependencies. That is, for function names, a mapping {"foo": {"bar", "test"}} would indicate that functions `bar` and `test` are immediately called in `foo`'s definition. start_entity (str | None, *optional*): A key of `dependency_mapping`, indicating from which entity to start the search. initial_dependencies (set | None, *optional*): If `start_entity` is not provided, this can be used as an alternative. In this case, the search will continue from all the entities in `initial_dependencies`, if they are in `dependency_mapping`. initial_checked_dependencies (set | None, *optional*): If provided, entities already present in `initial_checked_dependencies` will not be part of the returned dependencies. return_parent (bool, *optional*): If `True`, will return a list consisting of tuples (dependency, parent) instead of a simple set of dependencies. Note that the order of the items in the list reflects the traversal order. Thus, no parent can ever appear before childs. Returns: A set of all the dependencies, or a list of tuples `(dependency, parent)` if `return_parent=True`. Example: Given the following structure in the `modular_xxx.py` file: ``` def foo1(): pass def foo2(): pass def bar(): foo1() def foobar(): bar() foo2() class MyLayer(SomeOtherModelLayer): def forward(...): foobar() ``` and the `dependency_mapping` created when visiting the `modular_xxx.py` file, we get: ``` dependency_mapping = {'bar': {'foo1'}, 'foobar': {'bar', 'foo2'}} find_all_dependencies(dependency_mapping, start_entity='foobar', return_parent=True) >>> [('bar', 'foobar'), ('foo2', 'foobar'), ('foo1', 'bar')] ``` That is, all the functions needed (and potentially their immediate parent) so that the function to be added in MyLayer (`foobar`) can work correctly.
find_all_dependencies
python
huggingface/transformers
utils/modular_model_converter.py
https://github.com/huggingface/transformers/blob/master/utils/modular_model_converter.py
Apache-2.0
def dependencies_for_class_node(node: cst.ClassDef, global_names: set[str]) -> set: """Create immediate dependencies for a class node based on the `global_names`.""" temp_module = cst.Module(body=[node]) visitor = ClassDependencyMapper(node.name.value, global_names) temp_module.visit(visitor) return visitor.dependencies
Create immediate dependencies for a class node based on the `global_names`.
dependencies_for_class_node
python
huggingface/transformers
utils/modular_model_converter.py
https://github.com/huggingface/transformers/blob/master/utils/modular_model_converter.py
Apache-2.0
def augmented_dependencies_for_class_node( node: cst.ClassDef, mapper: "ModuleMapper", objects_imported_from_modeling: Optional[set[str]] = None ) -> set: """Create augmented dependencies for a class node based on a `mapper`. Augmented dependencies means immediate dependencies + recursive function and assignments dependencies. """ temp_module = cst.Module(body=[node]) visitor = ClassDependencyMapper(node.name.value, set(mapper.global_nodes.keys()), objects_imported_from_modeling) temp_module.visit(visitor) return mapper.augment_dependencies(visitor.dependencies)
Create augmented dependencies for a class node based on a `mapper`. Augmented dependencies means immediate dependencies + recursive function and assignments dependencies.
augmented_dependencies_for_class_node
python
huggingface/transformers
utils/modular_model_converter.py
https://github.com/huggingface/transformers/blob/master/utils/modular_model_converter.py
Apache-2.0
def visit_ImportFrom(self, node): """This keeps track of objects imported from neighbor modeling files (e.g. in `modeling_xxx.py, we have `from .configuration_xxx import Config`, then `Config` should be recorded as it is not a dependency that needs to be added (because it will be part of the imports)""" import_module = self.python_module.code_for_node(node.module) import_statement = "." * len(node.relative) + import_module if re.search(rf"^\.({self.match_patterns})_.*", import_statement): for imported_object in node.names: # If an alias is present, we record it and not the original name if imported_object.evaluated_alias is not None: self.objects_imported_from_modeling.add(imported_object.evaluated_alias) else: self.objects_imported_from_modeling.add(imported_object.evaluated_name)
This keeps track of objects imported from neighbor modeling files (e.g. in `modeling_xxx.py, we have `from .configuration_xxx import Config`, then `Config` should be recorded as it is not a dependency that needs to be added (because it will be part of the imports)
visit_ImportFrom
python
huggingface/transformers
utils/modular_model_converter.py
https://github.com/huggingface/transformers/blob/master/utils/modular_model_converter.py
Apache-2.0
def visit_SimpleStatementLine(self, node): """ Global Assigns like `GEMMA_INPUT_DOCSTRING = 'THIS IS THE INPUT'` and all import statements are extracted and saved in their corresponding dict. They are then used when updating dependency mappings. """ parent_node = self.get_metadata(cst.metadata.ParentNodeProvider, node) simple_top_level_assign_structure = m.SimpleStatementLine( body=[m.Assign(targets=[m.AssignTarget(target=m.Name())])] ) if m.matches(parent_node, m.Module()): if m.matches(node, simple_top_level_assign_structure): left_hand_side = node.body[0].targets[0].target.value self.current_assignment = left_hand_side self.assignments[left_hand_side] = node elif m.matches(node, m.SimpleStatementLine(body=[m.Import() | m.ImportFrom()])): self.imports.append(node)
Global Assigns like `GEMMA_INPUT_DOCSTRING = 'THIS IS THE INPUT'` and all import statements are extracted and saved in their corresponding dict. They are then used when updating dependency mappings.
visit_SimpleStatementLine
python
huggingface/transformers
utils/modular_model_converter.py
https://github.com/huggingface/transformers/blob/master/utils/modular_model_converter.py
Apache-2.0
def visit_ClassDef(self, node: ClassDef) -> None: """Record class nodes to create their dependencies at the end.""" self.classes[node.name.value] = node self.current_class = node.name.value
Record class nodes to create their dependencies at the end.
visit_ClassDef
python
huggingface/transformers
utils/modular_model_converter.py
https://github.com/huggingface/transformers/blob/master/utils/modular_model_converter.py
Apache-2.0
def visit_Name(self, node: cst.Call): """This is used to create a mapping from module-scope functions and assignments to objects used inside them.""" if self.current_function is not None: self.object_dependency_mapping[self.current_function].add(node.value) if self.current_assignment is not None: self.object_dependency_mapping[self.current_assignment].add(node.value)
This is used to create a mapping from module-scope functions and assignments to objects used inside them.
visit_Name
python
huggingface/transformers
utils/modular_model_converter.py
https://github.com/huggingface/transformers/blob/master/utils/modular_model_converter.py
Apache-2.0
def leave_Module(self, node): """When leaving the module, we store the position of each global scoped node to allow sorting the dependencies based on their position in the code later. We use the PositionProvider metadata wrapper for this. We also make sure to update `self.object_dependency_mapping` so that it contains only names recorded in `self.global_nodes`. """ # assign all nodes self.global_nodes = {**self.assignments, **self.classes, **self.functions} # now sort the class dependency_mapping based on the position of the nodes self.start_lines = {} for id, node in self.global_nodes.items(): self.start_lines[id] = self.get_metadata(cst.metadata.PositionProvider, node).start.line
When leaving the module, we store the position of each global scoped node to allow sorting the dependencies based on their position in the code later. We use the PositionProvider metadata wrapper for this. We also make sure to update `self.object_dependency_mapping` so that it contains only names recorded in `self.global_nodes`.
leave_Module
python
huggingface/transformers
utils/modular_model_converter.py
https://github.com/huggingface/transformers/blob/master/utils/modular_model_converter.py
Apache-2.0
def _restrict_dependencies_to_known_entities(self): """Since we added every Name as part of `self.object_dependency_mapping`, we need to remove those that are not part of the recorded objects in `self.global_nodes` (i.e. built-in variables, imports, etc). This should be called only after all merging operations have been finalized!!""" global_objects = set(self.global_nodes.keys()) for object_name, dependencies in self.object_dependency_mapping.items(): self.object_dependency_mapping[object_name] = {dep for dep in dependencies if dep in global_objects}
Since we added every Name as part of `self.object_dependency_mapping`, we need to remove those that are not part of the recorded objects in `self.global_nodes` (i.e. built-in variables, imports, etc). This should be called only after all merging operations have been finalized!!
_restrict_dependencies_to_known_entities
python
huggingface/transformers
utils/modular_model_converter.py
https://github.com/huggingface/transformers/blob/master/utils/modular_model_converter.py
Apache-2.0
def _compute_recursive_object_dependencies(self) -> dict[str, set]: """Based on immediate dependency mapping, create the recursive dependency mapping. For example, given the following file: ``` def foo(): pass def bar(): foo() def test(): bar() ``` this visitor can only record immediate dependencies, i.e. it will record the following `self.object_dependency_mapping = {"test": {"bar"}, "bar": {"foo}}`. This function is used to create the recursive mapping, i.e. `recursive_dependencies = {"test": {"bar", "foo"}, "bar": {"foo}}`. """ recursive_dependencies = {} for object_name in self.object_dependency_mapping.keys(): all_dependencies = find_all_dependencies(self.object_dependency_mapping, start_entity=object_name) recursive_dependencies[object_name] = all_dependencies return recursive_dependencies
Based on immediate dependency mapping, create the recursive dependency mapping. For example, given the following file: ``` def foo(): pass def bar(): foo() def test(): bar() ``` this visitor can only record immediate dependencies, i.e. it will record the following `self.object_dependency_mapping = {"test": {"bar"}, "bar": {"foo}}`. This function is used to create the recursive mapping, i.e. `recursive_dependencies = {"test": {"bar", "foo"}, "bar": {"foo}}`.
_compute_recursive_object_dependencies
python
huggingface/transformers
utils/modular_model_converter.py
https://github.com/huggingface/transformers/blob/master/utils/modular_model_converter.py
Apache-2.0
def augment_dependencies(self, dependencies: set[str]) -> set[str]: """For a set of `dependencies`, augment them by adding all potential dependencies of the **functions** and **assignments** present in the `dependencies`. """ new_dependencies = dependencies.copy() # Go through the set of dependencies for dep in tuple(dependencies): if dep in self.object_recursive_dependency_mapping.keys(): new_dependencies.update(self.object_recursive_dependency_mapping[dep]) return new_dependencies
For a set of `dependencies`, augment them by adding all potential dependencies of the **functions** and **assignments** present in the `dependencies`.
augment_dependencies
python
huggingface/transformers
utils/modular_model_converter.py
https://github.com/huggingface/transformers/blob/master/utils/modular_model_converter.py
Apache-2.0
def compute_class_dependencies(self): """For each visited class, find its dependencies based on visiting the current file + potential merged dependencies.""" self.class_dependency_mapping = {} for class_name, class_node in self.classes.items(): dependencies = dependencies_for_class_node(class_node, set(self.global_nodes.keys())) # Correctly augment class dependencies with all needed objects self.class_dependency_mapping[class_name] = self.augment_dependencies(dependencies)
For each visited class, find its dependencies based on visiting the current file + potential merged dependencies.
compute_class_dependencies
python
huggingface/transformers
utils/modular_model_converter.py
https://github.com/huggingface/transformers/blob/master/utils/modular_model_converter.py
Apache-2.0
def compute_relative_order(self, missing_dependencies: set[str]) -> dict[str, int]: """Compute in which relative order the `missing_dependencies` should appear when the nodes are added to the final file that will be created based on the modular. """ relative_order = {} idx = 0 classes = sorted( [dep for dep in tuple(missing_dependencies) if dep in self.classes], key=lambda x: self.start_lines[x] ) # This is because for merged dependencies, we only have relative order in the other visited file, so we need # to track dependency order relative to a given class if len(classes) > 0 and not hasattr(self, "class_dependency_mapping"): raise ValueError("Cannot correctly find the relative order of the dependencies.") remaining_dependencies = missing_dependencies.copy() # Start by tracking relative order class by class for class_name in classes: class_dependencies = tuple(self.class_dependency_mapping[class_name] & remaining_dependencies) original_dependencies = [] merged_dependencies = [] # We need to differentiate between nodes that were already present (we can get relative order globally) and # nodes that were merged (we can get relative order only relative to the class the dependencies relate to) for class_dep in class_dependencies: if class_dep in self.start_lines: original_dependencies.append(class_dep) else: merged_dependencies.append(class_dep) # We need to sort deterministically before actual sorting, so that entries missing (i.e. with value 1e10) # will always get the same order independently of the system (they come from a set, which has no deterministic order) original_dependencies = sorted(original_dependencies, reverse=True) # Sort both list according to the order in their respective file original_dependencies = sorted(original_dependencies, key=lambda x: self.start_lines.get(x, 1e10)) merged_dependencies = sorted(merged_dependencies, key=lambda x: self.modular_file_start_lines[x]) # Add all original node first, then merged ones for dep in original_dependencies + merged_dependencies: remaining_dependencies.remove(dep) relative_order[dep] = idx idx += 1 # Add the class itself (it can sometimes already be present if the order of classes in the source file # does not make sense, i.e. a class is used somewhere before being defined like in `rt_detr`...) if class_name in remaining_dependencies: remaining_dependencies.remove(class_name) relative_order[class_name] = idx idx += 1 # Now add what still remains remaining_dependencies = tuple(remaining_dependencies) original_dependencies = [] merged_dependencies = [] for dep in remaining_dependencies: if dep in self.modular_file_start_lines: merged_dependencies.append(dep) else: original_dependencies.append(dep) # We need to sort deterministically before actual sorting, so that entries missing (i.e. with value 1e10) # will always get the same order independently of the system (they come from a set, which has no deterministic order) original_dependencies = sorted(original_dependencies, reverse=True) # Sort both list according to the order in their respective file original_dependencies = sorted(original_dependencies, key=lambda x: self.start_lines.get(x, 1e10)) merged_dependencies = sorted(merged_dependencies, key=lambda x: self.modular_file_start_lines[x]) # Add all original node first, then merged ones for dep in original_dependencies + merged_dependencies: relative_order[dep] = idx idx += 1 return relative_order
Compute in which relative order the `missing_dependencies` should appear when the nodes are added to the final file that will be created based on the modular.
compute_relative_order
python
huggingface/transformers
utils/modular_model_converter.py
https://github.com/huggingface/transformers/blob/master/utils/modular_model_converter.py
Apache-2.0
def _merge_functions(self, functions: dict[str, cst.CSTNode], object_mapping: dict[str, set]): """Update the global nodes and function dependency mapping with those from the modular file. Merging rule: if any function with the same name was redefined in the modular, use it and its dependencies instead of the original ones (this may mean to add new functions as well, if any redefined function uses a new one). """ # Add/overwrite all needed function nodes and dependencies self.functions.update(functions) self.object_dependency_mapping.update( {obj: dep for obj, dep in object_mapping.items() if obj in functions.keys()} ) # Add them to global nodes self.global_nodes.update(self.functions)
Update the global nodes and function dependency mapping with those from the modular file. Merging rule: if any function with the same name was redefined in the modular, use it and its dependencies instead of the original ones (this may mean to add new functions as well, if any redefined function uses a new one).
_merge_functions
python
huggingface/transformers
utils/modular_model_converter.py
https://github.com/huggingface/transformers/blob/master/utils/modular_model_converter.py
Apache-2.0
def _merge_assignments(self, assignments: dict[str, cst.CSTNode], object_mapping: dict[str, set]): """Update the global nodes with the assignment from the modular file. Merging rule: if any assignment with the same name was redefined in the modular, we use it and its dependencies ONLY if it matches a pattern in `ASSIGNMENTS_REGEX_TO_KEEP_IF_NOT_NONE` and its value is not None, or if it matches a pattern in `ASSIGNMENTS_REGEX_TO_KEEP. Otherwise, we use the original value and dependencies. This rule was chosen to avoid having to rewrite the big docstrings. """ for assignment, node in assignments.items(): should_keep = any(re.search(pattern, assignment) for pattern in ASSIGNMENTS_REGEX_TO_KEEP) should_keep_if_not_none = any( re.search(pattern, assignment) for pattern in ASSIGNMENTS_REGEX_TO_KEEP_IF_NOT_NONE ) and not (hasattr(node.body[0].value, "value") and node.body[0].value.value == "None") if should_keep or should_keep_if_not_none or assignment not in self.assignments: self.assignments[assignment] = node if assignment in object_mapping: self.object_dependency_mapping[assignment] = object_mapping[assignment] # Add them to global nodes self.global_nodes.update(self.assignments)
Update the global nodes with the assignment from the modular file. Merging rule: if any assignment with the same name was redefined in the modular, we use it and its dependencies ONLY if it matches a pattern in `ASSIGNMENTS_REGEX_TO_KEEP_IF_NOT_NONE` and its value is not None, or if it matches a pattern in `ASSIGNMENTS_REGEX_TO_KEEP. Otherwise, we use the original value and dependencies. This rule was chosen to avoid having to rewrite the big docstrings.
_merge_assignments
python
huggingface/transformers
utils/modular_model_converter.py
https://github.com/huggingface/transformers/blob/master/utils/modular_model_converter.py
Apache-2.0
def _merge_classes(self, classes: dict[str, cst.CSTNode]): """Update the global nodes with the new classes from the modular (i.e. classes which do not exist in current file, and are not imported). We do NOT update any dependency mapping here. This is because we only need the names of newly defined classes in the modular to be discoverable when computing dependencies for new nodes later on. For this reason, we do not add the new classes to `self.classes`, but only to `global_nodes`. """ # Add/overwrite all needed function nodes and dependencies self.global_nodes.update( { name: node for name, node in classes.items() if name not in self.classes and name not in self.objects_imported_from_modeling } )
Update the global nodes with the new classes from the modular (i.e. classes which do not exist in current file, and are not imported). We do NOT update any dependency mapping here. This is because we only need the names of newly defined classes in the modular to be discoverable when computing dependencies for new nodes later on. For this reason, we do not add the new classes to `self.classes`, but only to `global_nodes`.
_merge_classes
python
huggingface/transformers
utils/modular_model_converter.py
https://github.com/huggingface/transformers/blob/master/utils/modular_model_converter.py
Apache-2.0
def merge_modular_dependencies(self, classes, functions, assignments, object_mapping, start_lines): """Merge classes, functions and assignments from the modular definitions into the current module file, then record the relative order of all nodes. Note: This function takes care of updating `global_nodes` and `object_recursive_dependency_mapping` as well after the merge with other files dependencies. """ self._merge_functions(functions, object_mapping) self._merge_assignments(assignments, object_mapping) self._merge_classes(classes) self.modular_file_start_lines = start_lines # Restrict the dependency mappings to the known entities to avoid Python's built-ins and imports self._restrict_dependencies_to_known_entities() # Create the global mapping of recursive dependencies for functions and assignments self.object_recursive_dependency_mapping = self._compute_recursive_object_dependencies()
Merge classes, functions and assignments from the modular definitions into the current module file, then record the relative order of all nodes. Note: This function takes care of updating `global_nodes` and `object_recursive_dependency_mapping` as well after the merge with other files dependencies.
merge_modular_dependencies
python
huggingface/transformers
utils/modular_model_converter.py
https://github.com/huggingface/transformers/blob/master/utils/modular_model_converter.py
Apache-2.0
def common_partial_suffix(str1: str, str2: str) -> str: """Return the biggest common suffix between 2 strings. If one string is a full suffix of the other string, we do not consider it a common suffix and return `""`""" common_suffix = "" for i in range(1, min(len(str1), len(str2)) + 1): if str1[-i] == str2[-i]: common_suffix = str1[-i] + common_suffix else: break # We do not allow full string suffix if common_suffix == str1 or common_suffix == str2: common_suffix = "" return common_suffix
Return the biggest common suffix between 2 strings. If one string is a full suffix of the other string, we do not consider it a common suffix and return `""`
common_partial_suffix
python
huggingface/transformers
utils/modular_model_converter.py
https://github.com/huggingface/transformers/blob/master/utils/modular_model_converter.py
Apache-2.0
def replace_class_node( mapper: ModelFileMapper, class_node: cst.ClassDef, renamed_super_class: str, original_super_class: str ): """ Replace a class node which inherits from another modeling class. This function works in the following way: - start from the base class node of the inherited class (a cst.Node) - replace all methods of the base node with the methods defined in the child class - append all new methods defined in the child class - replace all calls to super() with the unravelled code | ```python | | ```python | class GemmaModel(LlamaModel): | | class GemmaModel(nn.Module): | def __init__(self): | | def __init__(self): Going from: | super().__init__() | to: | super().__init__(config) | self.dropout = 0.2 | | self.dropout = 0.2 | ``` | | self.padding_idx = config.pad_token_id | self.vocab_size = config.vocab_size | self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) | self.layers = nn.ModuleList( | [LlamaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] | ) | self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) | self.gradient_checkpointing = False | # Initialize weights and apply final processing | self.post_init() | ``` """ all_bases = [get_full_attribute_name(k.value) for k in class_node.bases] if any(base is None for base in all_bases): raise ValueError(f"Could not parse the name of the bases for {class_node.name.value}") original_node = mapper.classes[renamed_super_class] # Always use the new name of the class (in case we use e.g. `ColPaliForRetrieval` inheriting from `PaliGemmaForConditionalGeneration`) new_name = class_node.name # If the new class name is different from the renamed super class name, we need to update the docstrings/comments accordingly if new_name.value != renamed_super_class: common_suffix = common_partial_suffix(new_name.value, renamed_super_class) # Note that this works even without common prefix, in which case it does not replace anything old, new = renamed_super_class.replace(common_suffix, ""), new_name.value.replace(common_suffix, "") temp_module = cst.Module(body=[original_node]) original_node = temp_module.visit( ReplaceNameTransformer(get_lowercase_name(old), get_lowercase_name(new), only_doc=True) ).body[0] # If we explicitly passed a new base with common suffix to an old base, it is for switching the prefix # e.g. if the "natural" parent class is `PreTrainedModel` but we wanted to rename it to `PreTrainedVisionModel` additional_bases = [base for base in all_bases if base != original_super_class] new_bases = [] for original_base in original_node.bases: new_base = original_base # we only potentially switch base for Name-based bases, not Attribute if m.matches(original_base.value, m.Name()): original_base_name = original_base.value.value for additional_base_name in additional_bases: suffix = common_partial_suffix(original_base_name, additional_base_name) if len(suffix) > 0 and suffix[0].isupper(): new_name_node = original_base.value.with_changes(value=additional_base_name) new_base = original_base.with_changes(value=new_name_node) break new_bases.append(new_base) original_methods = { f.name.value if hasattr(f, "name") else mapper.python_module.code_for_node(f): f for f in original_node.body.body } updated_methods = { f.name.value if hasattr(f, "name") else mapper.python_module.code_for_node(f): f for f in class_node.body.body } end_meth = [] assign_targets = {} docstring_node = [] # Iterate directly from node.body as there can be property/setters with same names which are overwritten when we use a dict for func in original_node.body.body: name = func.name.value if hasattr(func, "name") else mapper.python_module.code_for_node(func) if m.matches(func, m.FunctionDef()) and name in updated_methods and updated_methods[name] is not None: new_params = updated_methods[name].params # Replace the method in the replacement class, preserving decorators kwarg_name = getattr(updated_methods[name].params, "star_kwarg", None) if kwarg_name and kwarg_name.name.value == "super_kwargs": parent_params = {k.name.value: k for k in func.params.params} parent_params.update({k.name.value: k for k in new_params.params[1:]}) new_params = new_params.with_changes( params=list(parent_params.values()), star_kwarg=func.params.star_kwarg ) # Keep decorators in `modular_xxx.py` if any, else original decorators new_decorators = ( updated_methods[name].decorators if len(updated_methods[name].decorators) > 0 else func.decorators ) # Keep return annotation in `modular_xxx.py` if any, else original return annotation new_return_annotation = updated_methods[name].returns if updated_methods[name].returns else func.returns if not re.match( r"\ndef .*\(.*\):\n raise.*Error\(.*", mapper.python_module.code_for_node(updated_methods[name]), ): func = func.with_changes( body=updated_methods[name].body, params=new_params, decorators=new_decorators, returns=new_return_annotation, ) else: continue if m.matches(func, m.SimpleStatementLine(body=[m.Assign()])): target = mapper.python_module.code_for_node(func.body[0].targets[0]) assign_targets[target] = func elif m.matches(func, m.SimpleStatementLine(body=[m.AnnAssign()])): target = mapper.python_module.code_for_node(func.body[0].target) assign_targets[target] = func elif m.matches(func, DOCSTRING_NODE): docstring_node = [func] else: end_meth.append(func) # Port new methods that are defined only in modular-file and append at the end for func in class_node.body.body: name = func.name.value if hasattr(func, "name") else mapper.python_module.code_for_node(func) if m.matches(func, DOCSTRING_NODE): # This processes the docstring of the class! # Extract the original docstring updated_docstring = func.body[0].value.value if len(docstring_node) == 0: # If the original docstring is empty, just create one from the updated. docstring_node = [ cst.SimpleStatementLine(body=[cst.Expr(value=cst.SimpleString(value=updated_docstring))]) ] else: original_docstring = docstring_node[0].body[0].value.value merged_doc = merge_docstrings(original_docstring, updated_docstring) # Update the docstring in the original function docstring_node = [ docstring_node[0].with_changes(body=[cst.Expr(value=cst.SimpleString(value=merged_doc))]) ] if name not in original_methods and func is not None and isinstance(func, cst.FunctionDef): end_meth.append(func) if m.matches(func, m.SimpleStatementLine(body=[m.Assign()])): # TODO we only use single assign might cause issues target = mapper.python_module.code_for_node(func.body[0].targets[0]) assign_targets[target] = func if m.matches(func, m.SimpleStatementLine(body=[m.AnnAssign()])): target = mapper.python_module.code_for_node(func.body[0].target) assign_targets[target] = func end_meth = docstring_node + list(assign_targets.values()) + end_meth # Replace the calls to `super()` with the unrolled code result_node = original_node.with_changes(body=cst.IndentedBlock(body=end_meth)) temp_module = cst.Module(body=[result_node]) new_module = MetadataWrapper(temp_module) new_replacement_class = new_module.visit( SuperTransformer(temp_module, original_methods, updated_methods, all_bases) ) new_replacement_body = new_replacement_class.body[0].body # get the indented block # Use decorators redefined in `modular_xxx.py` if any new_decorators = class_node.decorators if len(class_node.decorators) > 0 else original_node.decorators return original_node.with_changes( body=new_replacement_body, decorators=new_decorators, bases=new_bases, name=new_name )
Replace a class node which inherits from another modeling class. This function works in the following way: - start from the base class node of the inherited class (a cst.Node) - replace all methods of the base node with the methods defined in the child class - append all new methods defined in the child class - replace all calls to super() with the unravelled code | ```python | | ```python | class GemmaModel(LlamaModel): | | class GemmaModel(nn.Module): | def __init__(self): | | def __init__(self): Going from: | super().__init__() | to: | super().__init__(config) | self.dropout = 0.2 | | self.dropout = 0.2 | ``` | | self.padding_idx = config.pad_token_id | self.vocab_size = config.vocab_size | self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) | self.layers = nn.ModuleList( | [LlamaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] | ) | self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) | self.gradient_checkpointing = False | # Initialize weights and apply final processing | self.post_init() | ```
replace_class_node
python
huggingface/transformers
utils/modular_model_converter.py
https://github.com/huggingface/transformers/blob/master/utils/modular_model_converter.py
Apache-2.0
def find_file_type(class_name: str) -> str: """Based on a class name, find the file type corresponding to the class. If the class name is `LlamaConfig` it will return `configuration`. The list of suffixes is in `TYPE_TO_FILE_TYPE`. If there are no match, we match by default to `modeling` """ match_pattern = "|".join(TYPE_TO_FILE_TYPE.keys()) match = re.search(rf"({match_pattern})$", class_name) if match: file_type = TYPE_TO_FILE_TYPE[match.group(1)] else: file_type = "modeling" return file_type
Based on a class name, find the file type corresponding to the class. If the class name is `LlamaConfig` it will return `configuration`. The list of suffixes is in `TYPE_TO_FILE_TYPE`. If there are no match, we match by default to `modeling`
find_file_type
python
huggingface/transformers
utils/modular_model_converter.py
https://github.com/huggingface/transformers/blob/master/utils/modular_model_converter.py
Apache-2.0
def append_new_import_node( node: cst.CSTNode, unused_imports: set[str], added_names: set, imports_to_keep: list[cst.CSTNode] ): """Insert the new `node` to the list of `imports_to_keep` in-place, if it is not part of the `unused_imports` or `added_names`. Also modifies `added_names` in-place accordingly.""" import_node = node.body[0] names_to_keep = [] for name in import_node.names: name_value = name.evaluated_alias or name.evaluated_name if name_value not in unused_imports and name_value not in added_names: names_to_keep.append(name.with_changes(comma=cst.MaybeSentinel.DEFAULT)) added_names.add(name_value) if len(names_to_keep) > 0: new_node = node.with_changes(body=[import_node.with_changes(names=names_to_keep)]) imports_to_keep.append(new_node)
Insert the new `node` to the list of `imports_to_keep` in-place, if it is not part of the `unused_imports` or `added_names`. Also modifies `added_names` in-place accordingly.
append_new_import_node
python
huggingface/transformers
utils/modular_model_converter.py
https://github.com/huggingface/transformers/blob/master/utils/modular_model_converter.py
Apache-2.0
def get_needed_imports(body: dict[str, dict], all_imports: list[cst.CSTNode]) -> list[cst.CSTNode]: """Get all the imports needed in the `body`, from the list of `all_imports`. `body` is a dict with the following structure `{str: {"insert_idx": int, "node": cst.CSTNode}}`. Note: we need to use `isinstance` on scope assignments, m.matches apparently does not work here yet! """ new_body = [k[1]["node"] for k in sorted(body.items(), key=lambda x: x[1]["insert_idx"])] wrapper = MetadataWrapper(cst.Module(body=all_imports + new_body)) scopes = set(wrapper.resolve(ScopeProvider).values()) unused_imports = set() import_ref_count = defaultdict(lambda: 0) for scope in scopes: for assignment in scope.assignments: node = assignment.node if isinstance(assignment, cst.metadata.Assignment) and isinstance(node, (cst.Import, cst.ImportFrom)): ref_count = len(assignment.references) name = assignment.name import_ref_count[name] = max(ref_count, import_ref_count[name]) # Similar imports may be redefined, and only used between their 1st and 2nd definition so if we already have # a ref count > 0 at any point, the imports is actually used unused_imports = {name for name, count in import_ref_count.items() if count <= 0 or name in body.keys()} imports_to_keep = [] # We need to keep track of which names were already imported, because some import may be duplicated from multiple sources # or be both protected and unprotected due to inconsistency between models added_names = set() existing_protected_statements = set() # str repr of the import nodes - does not work with the nodes directly for node in all_imports: if m.matches(node, m.If()): # handle safe imports new_statements = [] for stmt_node in node.body.body: append_new_import_node(stmt_node, unused_imports, added_names, new_statements) new_statements = [stmt for stmt in new_statements if str(stmt) not in existing_protected_statements] if len(new_statements) > 0: new_node = node.with_changes(body=node.body.with_changes(body=new_statements)) imports_to_keep.append(new_node) existing_protected_statements.update({str(stmt) for stmt in new_statements}) else: append_new_import_node(node, unused_imports, added_names, imports_to_keep) protected_import_nodes = [node for node in imports_to_keep if m.matches(node, m.If())] usual_import_nodes = [node for node in imports_to_keep if not m.matches(node, m.If())] # Protected imports always appear at the end of all imports return usual_import_nodes + protected_import_nodes
Get all the imports needed in the `body`, from the list of `all_imports`. `body` is a dict with the following structure `{str: {"insert_idx": int, "node": cst.CSTNode}}`. Note: we need to use `isinstance` on scope assignments, m.matches apparently does not work here yet!
get_needed_imports
python
huggingface/transformers
utils/modular_model_converter.py
https://github.com/huggingface/transformers/blob/master/utils/modular_model_converter.py
Apache-2.0
def split_all_assignment(node: cst.CSTNode) -> dict[str, cst.CSTNode]: """Split the `__all__` assignment found in the modular between each corresponding files.""" all_all_per_file = {} assign_node = node.body[0] if isinstance(assign_node.value, cst.List): # Extract the elements from the list all_all_to_add = defaultdict(list) for element in assign_node.value.elements: if isinstance(element.value, cst.SimpleString): # Remove quotes and add the string to the elements list class_name = element.value.value file = find_file_type(element.value.evaluated_value) all_all_to_add[file] += [class_name] for file, new_alls in all_all_to_add.items(): new_node = assign_node.with_changes( value=cst.List(elements=[cst.Element(value=cst.SimpleString(value=k)) for k in new_alls]) ) all_all_per_file[file] = node.with_changes(body=[new_node]) return all_all_per_file
Split the `__all__` assignment found in the modular between each corresponding files.
split_all_assignment
python
huggingface/transformers
utils/modular_model_converter.py
https://github.com/huggingface/transformers/blob/master/utils/modular_model_converter.py
Apache-2.0
def visit_ImportFrom(self, node: cst.ImportFrom) -> None: """When visiting imports from modeling files (i.e. `transformers.models.xxx`) we get the code, parse it, and save it in `self.model_specific_modules` to later visit. The imported objects are saved in `self.model_specific_imported_objects`. """ import_module = self.python_module.code_for_node(node.module) import_statement = "." * len(node.relative) + import_module if any(import_to_skip in import_statement for import_to_skip in IMPORTS_TO_SKIP_IN_MODULAR): return if m.matches(node.module, m.Attribute()): for imported_ in node.names: _import = re.search( rf"(?:transformers\.models\.)|(?:\.\.)\w+\.({self.match_patterns})_.*", import_statement ) if _import: source = _import.group(1) if source == "modeling" and "Config" in self.python_module.code_for_node(imported_): raise ValueError( f"You are importing {self.python_module.code_for_node(imported_)} from the modeling file. Import from the `configuration_xxxx.py` file instead" ) if import_module not in self.model_specific_modules: if "models" not in import_module: import_module = "models." + import_module if "transformers" not in import_module: import_module = "transformers." + import_module source_code = get_module_source_from_name(import_module) tree = cst.parse_module(source_code) self.model_specific_modules[import_module] = tree imported_object = self.python_module.code_for_node(imported_.name) self.model_specific_imported_objects[imported_object] = import_module if m.matches(node.module, m.Name()): if "transformers" == import_module: raise ValueError( f"You are importing from {import_module} directly using global imports. Import from the correct local path" )
When visiting imports from modeling files (i.e. `transformers.models.xxx`) we get the code, parse it, and save it in `self.model_specific_modules` to later visit. The imported objects are saved in `self.model_specific_imported_objects`.
visit_ImportFrom
python
huggingface/transformers
utils/modular_model_converter.py
https://github.com/huggingface/transformers/blob/master/utils/modular_model_converter.py
Apache-2.0
def visit_SimpleStatementLine(self, node): """If we visit an import statement not previously visited, record it. If we visit a module-scope assignment, simply record it or, if it is `__all__`, split it between files where we should dispatch it. """ parent_node = self.get_metadata(cst.metadata.ParentNodeProvider, node) simple_top_level_assign_structure = m.SimpleStatementLine( body=[m.Assign(targets=[m.AssignTarget(target=m.Name())])] ) if m.matches(parent_node, m.Module()): if m.matches(node, m.SimpleStatementLine(body=[m.Import()])): self.imports.append(node) elif m.matches(node, m.SimpleStatementLine(body=[m.ImportFrom()])): import_module = self.python_module.code_for_node(node.body[0].module) import_statement = "." * len(node.body[0].relative) + import_module if not ( re.search(rf"(?:transformers\.models\.)|(?:\.\.)\w+\.({self.match_patterns})_.*", import_statement) and not any(import_to_skip in import_statement for import_to_skip in IMPORTS_TO_SKIP_IN_MODULAR) ): self.imports.append(node) elif m.matches(node, simple_top_level_assign_structure): assigned_variable = node.body[0].targets[0].target.value # __all__ is treated differently and not added to general assignments if assigned_variable == "__all__": self.all_all_to_add = split_all_assignment(node) else: self.current_assignment = assigned_variable self.assignments[assigned_variable] = node
If we visit an import statement not previously visited, record it. If we visit a module-scope assignment, simply record it or, if it is `__all__`, split it between files where we should dispatch it.
visit_SimpleStatementLine
python
huggingface/transformers
utils/modular_model_converter.py
https://github.com/huggingface/transformers/blob/master/utils/modular_model_converter.py
Apache-2.0
def leave_Module(self, node): """When we leave the modular file, we do the following in order: 1. for each modeling file found in the imports, rename it with the new model name, visit it, and update its dependency graph with the new function and assignment definitions found in the modular 2. update the modular dependency graph with the imported functions and assignments (found when visiting the matching files) 3. compute the nested (recursive) function and assignment dependencies """ # Takes care of finalizing our visit super().leave_Module(node) # 1. for each modeling file found in the imports, rename it with the new model name, visit it, and update dependencies self.visited_modules = {} self.renamers = {} name_prefixes = self.infer_new_model_name() for file, module in self.model_specific_modules.items(): file_model_name = file.split(".")[-2] new_name = name_prefixes[file] renamer = ReplaceNameTransformer(file_model_name, new_name, self.model_name) renamed_module = module.visit(renamer) self.visited_modules[file] = ModelFileMapper.visit_and_merge_dependencies( renamed_module, self.classes, self.functions, self.assignments, self.object_dependency_mapping, self.start_lines, ) # We record it so that we can rename classes later the exact same way self.renamers[file] = renamer # 2. in turn, we need to add the imported functions/assignments to the dependencies of the modular mapper, using the # definitions found in the visited files self.merge_model_specific_imports(self.visited_modules) # 3. compute the nested (recursive) function and assignment dependencies self.object_recursive_dependency_mapping = self._compute_recursive_object_dependencies() # We need to keep track of which objects were imported directly into which modeling file to not add them wrongly later # Note that we may visit several of the same file types, thus we save them per file type, not file self.imported_objects_per_file = defaultdict(set) for file, mapper in self.visited_modules.items(): file_type = re.search(rf"^transformers\.models\.\w+\.({self.match_patterns})_.*", file).group(1) self.imported_objects_per_file[file_type].update(mapper.objects_imported_from_modeling)
When we leave the modular file, we do the following in order: 1. for each modeling file found in the imports, rename it with the new model name, visit it, and update its dependency graph with the new function and assignment definitions found in the modular 2. update the modular dependency graph with the imported functions and assignments (found when visiting the matching files) 3. compute the nested (recursive) function and assignment dependencies
leave_Module
python
huggingface/transformers
utils/modular_model_converter.py
https://github.com/huggingface/transformers/blob/master/utils/modular_model_converter.py
Apache-2.0
def merge_model_specific_imports(self, visited_modules): """Merge the functions and assignments imported from the modeling files to the modular nodes and dependency graph, based on the visited files.""" self.start_lines_file_mapping = {} self.added_objects_file_mapping = {} for object_name, file in self.model_specific_imported_objects.items(): visited_module = visited_modules[file] self.start_lines_file_mapping[file] = visited_module.start_lines # Add functions and their dependencies if object_name in visited_module.functions and object_name not in self.functions: self.functions[object_name] = visited_module.functions[object_name] self.added_objects_file_mapping[object_name] = file dependencies = visited_module.object_dependency_mapping.get(object_name, None) if dependencies is not None: self.object_dependency_mapping[object_name] = dependencies for dep in dependencies: if dep not in self.global_nodes: self.added_objects_file_mapping[dep] = file self.functions[dep] = visited_module.global_nodes[dep] # Add/overwrite the imported functions to other visited modules as well, in case it is absent/different # in he modeling source file of the inherited class. See `examples/modular-tranformers/modular_switch_function.py` # and `examples/modular-tranformers/modular_add_function.py` for examples recursive_dependencies = visited_module.object_recursive_dependency_mapping.get(object_name, set()) node_recursive_dependencies_mapping = { dep: visited_module.global_nodes[dep] for dep in recursive_dependencies } for filename, module_mapper in self.visited_modules.items(): if filename != file: module_mapper.global_nodes[object_name] = visited_module.functions[object_name] if len(recursive_dependencies) > 0: module_mapper.object_recursive_dependency_mapping[object_name] = recursive_dependencies module_mapper.global_nodes.update(node_recursive_dependencies_mapping) # Add assignments and their dependencies elif object_name in visited_module.assignments and object_name not in self.assignments: self.assignments[object_name] = visited_module.assignments[object_name] self.added_objects_file_mapping[object_name] = file dependencies = visited_module.object_dependency_mapping.get(object_name, None) if dependencies is not None: self.object_dependency_mapping[object_name] = dependencies for dep in dependencies: if dep not in self.global_nodes: self.added_objects_file_mapping[dep] = file self.assignments[dep] = visited_module.global_nodes[dep] # Do not forget to re-assign all nodes after the merge self.global_nodes = {**self.assignments, **self.classes, **self.functions} # And restric dependencies to those nodes only self._restrict_dependencies_to_known_entities()
Merge the functions and assignments imported from the modeling files to the modular nodes and dependency graph, based on the visited files.
merge_model_specific_imports
python
huggingface/transformers
utils/modular_model_converter.py
https://github.com/huggingface/transformers/blob/master/utils/modular_model_converter.py
Apache-2.0
def compute_relative_order(self, missing_dependencies: set) -> dict[str, int]: """Compute in which relative order the `missing_dependencies` should appear when the nodes are added to the final file that will be created based on the modular. """ relative_order = {} idx = 0 original_dependencies = [] other_files_dependencies = defaultdict(list) for dep in tuple(missing_dependencies): if dep in self.added_objects_file_mapping: file = self.added_objects_file_mapping[dep] other_files_dependencies[file].append(dep) else: original_dependencies.append(dep) # Sort all lists according to the order in their respective file all_dependencies = [] for file, dependencies in other_files_dependencies.items(): sorted_dependencies = sorted(dependencies, key=lambda x: self.start_lines_file_mapping[file][x]) all_dependencies += sorted_dependencies all_dependencies += sorted(original_dependencies, key=lambda x: self.start_lines[x]) # Add all original node first, then merged ones (one file at a time) for dep in all_dependencies: relative_order[dep] = idx idx += 1 return relative_order
Compute in which relative order the `missing_dependencies` should appear when the nodes are added to the final file that will be created based on the modular.
compute_relative_order
python
huggingface/transformers
utils/modular_model_converter.py
https://github.com/huggingface/transformers/blob/master/utils/modular_model_converter.py
Apache-2.0
def infer_new_model_name(self) -> dict: """Infer whether we are using a model name prefix different from the usual model name as defined from the filename. This is useful e.g. when we define a new multi-modal model, and only the text part inherits from `LlamaModel`, so we have something like: ```python class NewModelNameTextDecoderLayer(LlamaDecoderLayer): pass ``` with the `Text` prefix added to the model name. However, in case of multiple prefix used, we raise a warning and use the most frequent prefix, to avoid parsing the same file multiple times and inconsistencies in the objects added from dependencies. If the new prefix collides with a prefix of another class in the file where we are importing from, then we also raise a warning, and use the default prefix (model name) to avoid collisions in dependencies. """ prefix_model_name_mapping = defaultdict(Counter) cased_default_name = get_cased_name(self.model_name) # Iterate over all new classes to get modeling super classes for class_name, class_node in self.classes.items(): modeling_bases = [ k.value.value for k in class_node.bases if k.value.value in self.model_specific_imported_objects ] if len(modeling_bases) > 1: raise ValueError( f"{class_name} was defined with more than 1 model-specific super class. This is unsupported. We found {(*modeling_bases,)}." ) if len(modeling_bases) == 1: filename = self.model_specific_imported_objects[modeling_bases[0]] cased_model_name = cased_default_name # the default name prefix suffix = common_partial_suffix(class_name, modeling_bases[0]) if len(suffix) > 0 and suffix[0].isupper(): cased_model_name = class_name.replace(suffix, "") # If both the old model and new model share the last part of their name, is is detected as a common # suffix, but it should not be the case -> use the full name in this case if len(cased_model_name) < len(cased_default_name) and cased_default_name in class_name: cased_model_name = cased_default_name prefix_model_name_mapping[filename].update([cased_model_name]) # Check if we found multiple prefixes for some modeling files final_name_mapping = {} for file, prefixes_counter in prefix_model_name_mapping.items(): if len(prefixes_counter) > 1: _, total = prefixes_counter.most_common(1)[0] most_used_entities = [name for name, count in prefixes_counter.most_common() if count == total] # if the default name is in the pool of equally used prefixes, use it, otherwise last encountered final_name = cased_default_name if cased_default_name in most_used_entities else most_used_entities[-1] else: final_name = list(prefixes_counter)[0] # Check if the prefix can be used without collisions in the names old_cased_model_name = get_cased_name(file.split(".")[-2]) old_model_name_prefix = final_name.replace(cased_default_name, old_cased_model_name) # Raise adequate warning depending on the situation has_prefix_collision = f"\nclass {old_model_name_prefix}" in get_module_source_from_name(file) if final_name != cased_default_name and has_prefix_collision: if len(prefixes_counter) > 1: logger.warning( f"We detected multiple prefix names when inheriting from {file}: {(*set(prefixes_counter),)}. However, the " f"most used one, '{final_name}', is already present in the source file and will likely cause consistency " f"issues. For this reason we fallback to the default prefix '{cased_default_name}' when grabbing args " "and dependencies. Make sure to subclass the intermediate classes with the prefix you want (if different " f"from '{cased_default_name}') or use a single prefix in all the modular (best)." ) else: logger.warning( f"We detected the use of the new default prefix {final_name} when inheriting from {file}. However, it is " "already present in the source file and will likely cause consistency issues. For this reason we fallback " f"to the default prefix '{cased_default_name}' when grabbing args and dependencies. Make sure to subclass " f"the intermediate classes with the prefix you want (if different from '{cased_default_name}')" ) final_name = cased_default_name elif len(prefixes_counter) > 1: logger.warning( f"We detected multiple prefix names when inheriting from {file}: {(*set(prefixes_counter),)}. We will only " f"use the most used '{final_name}' prefix when grabbing args and dependencies. Make sure to subclass the " f"intermediate classes with the prefix you want (if different from '{final_name}') or use a single prefix " "in all the modular (best)." ) final_name_mapping[file] = get_lowercase_name(final_name) # Check we are not missing imported files for file in self.model_specific_modules.keys(): if file not in final_name_mapping.keys(): final_name_mapping[file] = self.model_name return final_name_mapping
Infer whether we are using a model name prefix different from the usual model name as defined from the filename. This is useful e.g. when we define a new multi-modal model, and only the text part inherits from `LlamaModel`, so we have something like: ```python class NewModelNameTextDecoderLayer(LlamaDecoderLayer): pass ``` with the `Text` prefix added to the model name. However, in case of multiple prefix used, we raise a warning and use the most frequent prefix, to avoid parsing the same file multiple times and inconsistencies in the objects added from dependencies. If the new prefix collides with a prefix of another class in the file where we are importing from, then we also raise a warning, and use the default prefix (model name) to avoid collisions in dependencies.
infer_new_model_name
python
huggingface/transformers
utils/modular_model_converter.py
https://github.com/huggingface/transformers/blob/master/utils/modular_model_converter.py
Apache-2.0
def check_dependencies_and_create_import_node( file_type: str, new_dependencies: set[str], mapper: ModuleMapper, new_name: str ) -> tuple[set[str], dict[str, cst.CSTNode]]: """Check that all class nodes in the `new_dependencies` belong to the correct `file_type`. If this is not the case, we need to remove it from the dependencies, and create a new import to it instead. This scenario may appear in the following case: If a new class in the `modular_xxx.py` file does not belong to `type_xxx.py`, but is used somewhere in `other_type_xxx.py` (e.g. as a type hint), but none of the visited files had a similar class, then it would be imported in `type_xxx.py` as part of the standard dependency graph (because we never encountered an import towards this new class in any file). For example imagine the following `modular.py`: ``` from ..llama.modeling_llama import LlamaModel class NewNameTextConfig(PretrainedConfig): ... class NewNameConfig(PretrainedConfig): ... class NewNameModel(LlamaModel): config = NewNameConfig() text_config = NewNameTextConfig() ... ``` then without the help of this function, `NewNameTextConfig` would be imported in the `modeling_newname.py` as well as `configuration_newname.py`, because `modeling_llama.py` tells us to not import `NewNameConfig`, but has no knowledge of `NewNameTextConfig`. """ class_dependencies = {dep for dep in new_dependencies if m.matches(mapper.global_nodes[dep], m.ClassDef())} corrected_dependencies = new_dependencies.copy() new_imports = {} for class_name in class_dependencies: class_file_type = find_file_type(class_name) # In this case, we need to remove it from the dependencies and create a new import instead if class_file_type != file_type: corrected_dependencies.remove(class_name) import_statement = f"from .{class_file_type}_{new_name} import {class_name}" new_imports[class_name] = cst.parse_statement(import_statement) return corrected_dependencies, new_imports
Check that all class nodes in the `new_dependencies` belong to the correct `file_type`. If this is not the case, we need to remove it from the dependencies, and create a new import to it instead. This scenario may appear in the following case: If a new class in the `modular_xxx.py` file does not belong to `type_xxx.py`, but is used somewhere in `other_type_xxx.py` (e.g. as a type hint), but none of the visited files had a similar class, then it would be imported in `type_xxx.py` as part of the standard dependency graph (because we never encountered an import towards this new class in any file). For example imagine the following `modular.py`: ``` from ..llama.modeling_llama import LlamaModel class NewNameTextConfig(PretrainedConfig): ... class NewNameConfig(PretrainedConfig): ... class NewNameModel(LlamaModel): config = NewNameConfig() text_config = NewNameTextConfig() ... ``` then without the help of this function, `NewNameTextConfig` would be imported in the `modeling_newname.py` as well as `configuration_newname.py`, because `modeling_llama.py` tells us to not import `NewNameConfig`, but has no knowledge of `NewNameTextConfig`.
check_dependencies_and_create_import_node
python
huggingface/transformers
utils/modular_model_converter.py
https://github.com/huggingface/transformers/blob/master/utils/modular_model_converter.py
Apache-2.0
def get_class_node_and_dependencies( modular_mapper: ModularFileMapper, class_name: str, node: cst.CSTNode, files: dict[str, dict] ) -> tuple[dict, str, dict]: """Return a single class node (and all its dependency nodes), to be added to the `files`. It creates the new class node based on the inherited classes if needed. Also returns any new imports of a new class defined in the modular that we nay need. """ # An exception was already raised if this has len > 1 model_specific_bases = [ k.value.value for k in node.bases if k.value.value in modular_mapper.model_specific_imported_objects ] super_class = model_specific_bases[0] if len(model_specific_bases) == 1 else None file_type = find_file_type(class_name) file_to_update = files[file_type] model_name = modular_mapper.model_name # This is used to avoid adding objects to the dependencies graph if they will be imported already imported_objects = modular_mapper.imported_objects_per_file[file_type] # We need to replace the class node with the transformers (modeling file) super class node if super_class is not None: super_file_name = modular_mapper.model_specific_imported_objects[super_class] # Get the mapper corresponding to the inherited class mapper = modular_mapper.visited_modules[super_file_name] # Rename the super class according to the exact same rule we used when renaming the whole module renamer = modular_mapper.renamers[super_file_name] renamed_super_class = preserve_case_replace(super_class, renamer.patterns, renamer.cased_new_name) # Create the new class node updated_node = replace_class_node(mapper, node, renamed_super_class, super_class) # Grab all immediate dependencies of the new node new_node_dependencies = augmented_dependencies_for_class_node(updated_node, mapper, imported_objects) # At this point, if any class dependency is found, but belongs to another file, it means that we need to remove # it from the dependencies, and add a new import of it instead new_node_dependencies, new_imports = check_dependencies_and_create_import_node( file_type, new_node_dependencies, mapper, model_name ) # The node was modified -> look for all recursive dependencies of the new node all_dependencies_to_add = find_all_dependencies( dependency_mapping=mapper.class_dependency_mapping, initial_dependencies=new_node_dependencies, initial_checked_dependencies=set(file_to_update.keys()), ) relative_dependency_order = mapper.compute_relative_order(all_dependencies_to_add) nodes_to_add = { dep: (relative_dependency_order[dep], mapper.global_nodes[dep]) for dep in all_dependencies_to_add } # No transformers (modeling file) super class, just check functions and assignments dependencies else: updated_node = node # The node was NOT modified -> no need to look recursively for other class dependencies. Indeed, even if they are not # already defined (which would mean a weird order of the code in the modular...), they will be in the future all_dependencies_to_add = augmented_dependencies_for_class_node(updated_node, modular_mapper, imported_objects) # At this point, if any class dependency is found, but belongs to another file, it means that we need to remove # it from the dependencies, and add a new import of it instead all_dependencies_to_add, new_imports = check_dependencies_and_create_import_node( file_type, all_dependencies_to_add, modular_mapper, model_name ) relative_dependency_order = modular_mapper.compute_relative_order(all_dependencies_to_add) nodes_to_add = { dep: (relative_dependency_order[dep], modular_mapper.global_nodes[dep]) for dep in all_dependencies_to_add if dep not in file_to_update.keys() } # Add the class node itself to the nodes to add class_idx = max(relative_dependency_order.values()) + 1 if len(relative_dependency_order) > 0 else 0 nodes_to_add[class_name] = (class_idx, updated_node) return nodes_to_add, file_type, new_imports
Return a single class node (and all its dependency nodes), to be added to the `files`. It creates the new class node based on the inherited classes if needed. Also returns any new imports of a new class defined in the modular that we nay need.
get_class_node_and_dependencies
python
huggingface/transformers
utils/modular_model_converter.py
https://github.com/huggingface/transformers/blob/master/utils/modular_model_converter.py
Apache-2.0
def create_modules(modular_mapper: ModularFileMapper) -> dict[str, cst.Module]: """Create all the new modules based on visiting the modular file. It replaces all classes as necessary.""" files = defaultdict(dict) current_file_indices = defaultdict(lambda: 0) # For each class defined in modular, potentially replace the node and add it with its dependencies for class_name, node in modular_mapper.classes.items(): nodes_to_add, file_type, new_imports = get_class_node_and_dependencies(modular_mapper, class_name, node, files) # Add the new potential new imports that we may need to the `modular_mapper` variable modular_mapper.imported_objects_per_file[file_type].update(new_imports.keys()) modular_mapper.imports.extend(list(new_imports.values())) # Sort the nodes according to their relative order nodes_to_add = sorted(nodes_to_add.items(), key=lambda x: x[1][0]) # Write all nodes to file for dependency, (_, node) in nodes_to_add: # This is used to keep certain variables at the beginning of the file try: # The -1000 is arbitrary -> just keep it bigger than the list idx = -1000 + VARIABLES_AT_THE_BEGINNING.index(dependency) except ValueError: idx = current_file_indices[file_type] current_file_indices[file_type] += 1 files[file_type][dependency] = {"insert_idx": idx, "node": node} # Add the __all__ statement to files at the end for file_type, node in modular_mapper.all_all_to_add.items(): idx = current_file_indices[file_type] files[file_type]["__all__"] = {"insert_idx": idx, "node": node} # Aggregate all the imports statements (we look for duplicates with the code_for_node, not the nodes themselves because # they are wrapped in SimpleStatementLine or If which could have different newlines, blanks etc) all_imports = modular_mapper.imports.copy() all_imports_code = {modular_mapper.python_module.code_for_node(node).strip() for node in all_imports} for file, mapper in modular_mapper.visited_modules.items(): new_imports = [ node for node in mapper.imports if mapper.python_module.code_for_node(node).strip() not in all_imports_code ] new_imports_code = {mapper.python_module.code_for_node(node).strip() for node in new_imports} all_imports.extend(new_imports) all_imports_code.update(new_imports_code) # Find the correct imports, and write the new modules for file, body in files.items(): new_body = [k[1]["node"] for k in sorted(body.items(), key=lambda x: x[1]["insert_idx"])] needed_imports = get_needed_imports(body, all_imports) full_module = needed_imports + new_body new_module = cst.Module(body=full_module, header=modular_mapper.python_module.header) files[file] = new_module return files
Create all the new modules based on visiting the modular file. It replaces all classes as necessary.
create_modules
python
huggingface/transformers
utils/modular_model_converter.py
https://github.com/huggingface/transformers/blob/master/utils/modular_model_converter.py
Apache-2.0
def get_reply_blocks(self, job_name, job_result, failures, device, text): """ failures: A list with elements of the form {"line": full test name, "trace": error trace} """ # `text` must be less than 3001 characters in Slack SDK # keep some room for adding "[Truncated]" when necessary MAX_ERROR_TEXT = 3000 - len("[Truncated]") failure_text = "" for idx, error in enumerate(failures): new_text = failure_text + f"*{error['line']}*\n_{error['trace']}_\n\n" if len(new_text) > MAX_ERROR_TEXT: # `failure_text` here has length <= 3000 failure_text = failure_text + "[Truncated]" break # `failure_text` here has length <= MAX_ERROR_TEXT failure_text = new_text title = job_name if device is not None: title += f" ({device}-gpu)" content = {"type": "section", "text": {"type": "mrkdwn", "text": text}} # TODO: Make sure we always have a valid job link (or at least a way not to break the report sending) # Currently we get the device from a job's artifact name. # If a device is found, the job name should contain the device type, for example, `XXX (single-gpu)`. # This could be done by adding `machine_type` in a job's `strategy`. # (If `job_result["job_link"][device]` is `None`, we get an error: `... [ERROR] must provide a string ...`) if job_result["job_link"] is not None and job_result["job_link"][device] is not None: content["accessory"] = { "type": "button", "text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True}, "url": job_result["job_link"][device], } return [ {"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}}, content, {"type": "section", "text": {"type": "mrkdwn", "text": failure_text}}, ]
failures: A list with elements of the form {"line": full test name, "trace": error trace}
get_reply_blocks
python
huggingface/transformers
utils/notification_service.py
https://github.com/huggingface/transformers/blob/master/utils/notification_service.py
Apache-2.0
def get_release_branch_name(): """Derive branch name from transformers version.""" major, minor, *_ = transformers.__version__.split(".") major = int(major) minor = int(minor) if minor == 0: # Handle major version rollback, e.g., from 5.0 to 4.latest (if ever needed) major -= 1 # You'll need logic to determine the last minor of the previous major version raise ValueError("Minor version is 0; need logic to find previous major version's last minor") else: minor -= 1 return f"v{major}.{minor}-release"
Derive branch name from transformers version.
get_release_branch_name
python
huggingface/transformers
utils/patch_helper.py
https://github.com/huggingface/transformers/blob/master/utils/patch_helper.py
Apache-2.0
def get_prs_by_label(label): """Call gh CLI to get PRs with a specific label.""" cmd = [ "gh", "pr", "list", "--label", label, "--state", "all", "--json", "number,title,mergeCommit,url", "--limit", "100", ] result = subprocess.run(cmd, capture_output=True, text=True) result.check_returncode() prs = json.loads(result.stdout) for pr in prs: is_merged = pr.get("mergeCommit", {}) if is_merged: pr["oid"] = is_merged.get("oid") return prs
Call gh CLI to get PRs with a specific label.
get_prs_by_label
python
huggingface/transformers
utils/patch_helper.py
https://github.com/huggingface/transformers/blob/master/utils/patch_helper.py
Apache-2.0
def count_lines(filepath): """Count the number of lines in a file.""" try: with open(filepath, "r") as f: return len(f.read().split("\n")) except FileNotFoundError: return 0
Count the number of lines in a file.
count_lines
python
huggingface/transformers
utils/process_test_artifacts.py
https://github.com/huggingface/transformers/blob/master/utils/process_test_artifacts.py
Apache-2.0
def compute_parallel_nodes(line_count, max_tests_per_node=10): """Compute the number of parallel nodes required.""" num_nodes = math.ceil(line_count / AVERAGE_TESTS_PER_NODES) if line_count < 4: return 1 return min(MAX_PARALLEL_NODES, num_nodes)
Compute the number of parallel nodes required.
compute_parallel_nodes
python
huggingface/transformers
utils/process_test_artifacts.py
https://github.com/huggingface/transformers/blob/master/utils/process_test_artifacts.py
Apache-2.0
def get_new_python_files(diff_with_last_commit=False) -> List[str]: """ Return a list of python files that have been added between the current head and the main branch. Returns: `List[str]`: The list of python files added. """ repo = Repo(PATH_TO_REPO) try: # For the cases where the main branch exists locally main = repo.refs.main except AttributeError: # On GitHub Actions runners, it doesn't have local main branch main = repo.remotes.origin.refs.main if not diff_with_last_commit: print(f"main is at {main.commit}") print(f"Current head is at {repo.head.commit}") commits = repo.merge_base(main, repo.head) for commit in commits: print(f"Branching commit: {commit}") else: print(f"main is at {main.commit}") commits = main.commit.parents for commit in commits: print(f"Parent commit: {commit}") return get_new_python_files_between_commits(repo.head.commit, commits)
Return a list of python files that have been added between the current head and the main branch. Returns: `List[str]`: The list of python files added.
get_new_python_files
python
huggingface/transformers
utils/pr_slow_ci_models.py
https://github.com/huggingface/transformers/blob/master/utils/pr_slow_ci_models.py
Apache-2.0
def parse_message(message: str) -> str: """ Parses a GitHub pull request's comment to find the models specified in it to run slow CI. Args: message (`str`): The body of a GitHub pull request's comment. Returns: `str`: The substring in `message` after `run-slow`, run_slow` or run slow`. If no such prefix is found, the empty string is returned. """ if message is None: return "" message = message.strip().lower() # run-slow: model_1, model_2 if not message.startswith(("run-slow", "run_slow", "run slow")): return "" message = message[len("run slow") :] # remove leading `:` while message.strip().startswith(":"): message = message.strip()[1:] return message
Parses a GitHub pull request's comment to find the models specified in it to run slow CI. Args: message (`str`): The body of a GitHub pull request's comment. Returns: `str`: The substring in `message` after `run-slow`, run_slow` or run slow`. If no such prefix is found, the empty string is returned.
parse_message
python
huggingface/transformers
utils/pr_slow_ci_models.py
https://github.com/huggingface/transformers/blob/master/utils/pr_slow_ci_models.py
Apache-2.0
def update_version_in_file(fname: str, version: str, file_type: str): """ Update the version of Transformers in one file. Args: fname (`str`): The path to the file where we want to update the version. version (`str`): The new version to set in the file. file_type (`str`): The type of the file (should be a key in `REPLACE_PATTERNS`). """ with open(fname, "r", encoding="utf-8", newline="\n") as f: code = f.read() re_pattern, replace = REPLACE_PATTERNS[file_type] replace = replace.replace("VERSION", version) code = re_pattern.sub(replace, code) with open(fname, "w", encoding="utf-8", newline="\n") as f: f.write(code)
Update the version of Transformers in one file. Args: fname (`str`): The path to the file where we want to update the version. version (`str`): The new version to set in the file. file_type (`str`): The type of the file (should be a key in `REPLACE_PATTERNS`).
update_version_in_file
python
huggingface/transformers
utils/release.py
https://github.com/huggingface/transformers/blob/master/utils/release.py
Apache-2.0
def update_version_in_examples(version: str): """ Update the version in all examples files. Args: version (`str`): The new version to set in the examples. """ for folder, directories, fnames in os.walk(PATH_TO_EXAMPLES): # Removing some of the folders with non-actively maintained examples from the walk if "legacy" in directories: directories.remove("legacy") for fname in fnames: if fname.endswith(".py"): update_version_in_file(os.path.join(folder, fname), version, file_type="examples")
Update the version in all examples files. Args: version (`str`): The new version to set in the examples.
update_version_in_examples
python
huggingface/transformers
utils/release.py
https://github.com/huggingface/transformers/blob/master/utils/release.py
Apache-2.0
def global_version_update(version: str, patch: bool = False): """ Update the version in all needed files. Args: version (`str`): The new version to set everywhere. patch (`bool`, *optional*, defaults to `False`): Whether or not this is a patch release. """ for pattern, fname in REPLACE_FILES.items(): update_version_in_file(fname, version, pattern) if not patch: # We don't update the version in the examples for patch releases. update_version_in_examples(version)
Update the version in all needed files. Args: version (`str`): The new version to set everywhere. patch (`bool`, *optional*, defaults to `False`): Whether or not this is a patch release.
global_version_update
python
huggingface/transformers
utils/release.py
https://github.com/huggingface/transformers/blob/master/utils/release.py
Apache-2.0
def remove_conversion_scripts(): """ Delete the scripts that convert models from older, unsupported formats. We don't want to include these in release wheels because they often have to open insecure file types (pickle, Torch .bin models). This results in vulnerability scanners flagging us and can cause compliance issues for users with strict security policies. """ model_dir = Path(PATH_TO_MODELS) for conversion_script in list(model_dir.glob("**/convert*.py")): conversion_script.unlink()
Delete the scripts that convert models from older, unsupported formats. We don't want to include these in release wheels because they often have to open insecure file types (pickle, Torch .bin models). This results in vulnerability scanners flagging us and can cause compliance issues for users with strict security policies.
remove_conversion_scripts
python
huggingface/transformers
utils/release.py
https://github.com/huggingface/transformers/blob/master/utils/release.py
Apache-2.0
def get_version() -> packaging.version.Version: """ Reads the current version in the main __init__. """ with open(REPLACE_FILES["init"], "r") as f: code = f.read() default_version = REPLACE_PATTERNS["init"][0].search(code).groups()[0] return packaging.version.parse(default_version)
Reads the current version in the main __init__.
get_version
python
huggingface/transformers
utils/release.py
https://github.com/huggingface/transformers/blob/master/utils/release.py
Apache-2.0
def pre_release_work(patch: bool = False): """ Do all the necessary pre-release steps: - figure out the next minor release version and ask confirmation - update the version everywhere - clean-up the model list in the main README Args: patch (`bool`, *optional*, defaults to `False`): Whether or not this is a patch release. """ # First let's get the default version: base version if we are in dev, bump minor otherwise. default_version = get_version() if patch and default_version.is_devrelease: raise ValueError("Can't create a patch version from the dev branch, checkout a released version!") if default_version.is_devrelease: default_version = default_version.base_version elif patch: default_version = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}" else: default_version = f"{default_version.major}.{default_version.minor + 1}.0" # Now let's ask nicely if we have found the right version. version = input(f"Which version are you releasing? [{default_version}]") if len(version) == 0: version = default_version print(f"Updating version to {version}.") global_version_update(version, patch=patch) print("Deleting conversion scripts.") remove_conversion_scripts()
Do all the necessary pre-release steps: - figure out the next minor release version and ask confirmation - update the version everywhere - clean-up the model list in the main README Args: patch (`bool`, *optional*, defaults to `False`): Whether or not this is a patch release.
pre_release_work
python
huggingface/transformers
utils/release.py
https://github.com/huggingface/transformers/blob/master/utils/release.py
Apache-2.0
def post_release_work(): """ Do all the necessary post-release steps: - figure out the next dev version and ask confirmation - update the version everywhere - clean-up the model list in the main README """ # First let's get the current version current_version = get_version() dev_version = f"{current_version.major}.{current_version.minor + 1}.0.dev0" current_version = current_version.base_version # Check with the user we got that right. version = input(f"Which version are we developing now? [{dev_version}]") if len(version) == 0: version = dev_version print(f"Updating version to {version}.") global_version_update(version)
Do all the necessary post-release steps: - figure out the next dev version and ask confirmation - update the version everywhere - clean-up the model list in the main README
post_release_work
python
huggingface/transformers
utils/release.py
https://github.com/huggingface/transformers/blob/master/utils/release.py
Apache-2.0
def sort_auto_mapping(fname: str, overwrite: bool = False) -> Optional[bool]: """ Sort all auto mappings in a file. Args: fname (`str`): The name of the file where we want to sort auto-mappings. overwrite (`bool`, *optional*, defaults to `False`): Whether or not to fix and overwrite the file. Returns: `Optional[bool]`: Returns `None` if `overwrite=True`. Otherwise returns `True` if the file has an auto-mapping improperly sorted, `False` if the file is okay. """ with open(fname, "r", encoding="utf-8") as f: content = f.read() lines = content.split("\n") new_lines = [] line_idx = 0 while line_idx < len(lines): if _re_intro_mapping.search(lines[line_idx]) is not None: # Start of a new mapping! indent = len(re.search(r"^(\s*)\S", lines[line_idx]).groups()[0]) + 8 while not lines[line_idx].startswith(" " * indent + "("): new_lines.append(lines[line_idx]) line_idx += 1 blocks = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": start_idx = line_idx while not lines[line_idx].startswith(" " * indent + ")"): line_idx += 1 blocks.append("\n".join(lines[start_idx : line_idx + 1])) else: blocks.append(lines[line_idx]) line_idx += 1 # Sort blocks by their identifiers blocks = sorted(blocks, key=lambda x: _re_identifier.search(x).groups()[0]) new_lines += blocks else: new_lines.append(lines[line_idx]) line_idx += 1 if overwrite: with open(fname, "w", encoding="utf-8") as f: f.write("\n".join(new_lines)) else: return "\n".join(new_lines) != content
Sort all auto mappings in a file. Args: fname (`str`): The name of the file where we want to sort auto-mappings. overwrite (`bool`, *optional*, defaults to `False`): Whether or not to fix and overwrite the file. Returns: `Optional[bool]`: Returns `None` if `overwrite=True`. Otherwise returns `True` if the file has an auto-mapping improperly sorted, `False` if the file is okay.
sort_auto_mapping
python
huggingface/transformers
utils/sort_auto_mappings.py
https://github.com/huggingface/transformers/blob/master/utils/sort_auto_mappings.py
Apache-2.0
def sort_all_auto_mappings(overwrite: bool = False): """ Sort all auto mappings in the library. Args: overwrite (`bool`, *optional*, defaults to `False`): Whether or not to fix and overwrite the file. """ fnames = [os.path.join(PATH_TO_AUTO_MODULE, f) for f in os.listdir(PATH_TO_AUTO_MODULE) if f.endswith(".py")] diffs = [sort_auto_mapping(fname, overwrite=overwrite) for fname in fnames] if not overwrite and any(diffs): failures = [f for f, d in zip(fnames, diffs) if d] raise ValueError( f"The following files have auto mappings that need sorting: {', '.join(failures)}. Run `make style` to fix" " this." )
Sort all auto mappings in the library. Args: overwrite (`bool`, *optional*, defaults to `False`): Whether or not to fix and overwrite the file.
sort_all_auto_mappings
python
huggingface/transformers
utils/sort_auto_mappings.py
https://github.com/huggingface/transformers/blob/master/utils/sort_auto_mappings.py
Apache-2.0
def clean_code(content: str) -> str: """ Remove docstrings, empty line or comments from some code (used to detect if a diff is real or only concern comments or docstings). Args: content (`str`): The code to clean Returns: `str`: The cleaned code. """ # We need to deactivate autoformatting here to write escaped triple quotes (we cannot use real triple quotes or # this would mess up the result if this function applied to this particular file). # fmt: off # Remove docstrings by splitting on triple " then triple ': splits = content.split('\"\"\"') content = "".join(splits[::2]) splits = content.split("\'\'\'") # fmt: on content = "".join(splits[::2]) # Remove empty lines and comments lines_to_keep = [] for line in content.split("\n"): # remove anything that is after a # sign. line = re.sub("#.*$", "", line) # remove white lines if len(line) != 0 and not line.isspace(): lines_to_keep.append(line) return "\n".join(lines_to_keep)
Remove docstrings, empty line or comments from some code (used to detect if a diff is real or only concern comments or docstings). Args: content (`str`): The code to clean Returns: `str`: The cleaned code.
clean_code
python
huggingface/transformers
utils/tests_fetcher.py
https://github.com/huggingface/transformers/blob/master/utils/tests_fetcher.py
Apache-2.0
def keep_doc_examples_only(content: str) -> str: """ Remove everything from the code content except the doc examples (used to determined if a diff should trigger doc tests or not). Args: content (`str`): The code to clean Returns: `str`: The cleaned code. """ # Keep doc examples only by splitting on triple "`" splits = content.split("```") # Add leading and trailing "```" so the navigation is easier when compared to the original input `content` content = "```" + "```".join(splits[1::2]) + "```" # Remove empty lines and comments lines_to_keep = [] for line in content.split("\n"): # remove anything that is after a # sign. line = re.sub("#.*$", "", line) # remove white lines if len(line) != 0 and not line.isspace(): lines_to_keep.append(line) return "\n".join(lines_to_keep)
Remove everything from the code content except the doc examples (used to determined if a diff should trigger doc tests or not). Args: content (`str`): The code to clean Returns: `str`: The cleaned code.
keep_doc_examples_only
python
huggingface/transformers
utils/tests_fetcher.py
https://github.com/huggingface/transformers/blob/master/utils/tests_fetcher.py
Apache-2.0
def get_all_tests() -> List[str]: """ Walks the `tests` folder to return a list of files/subfolders. This is used to split the tests to run when using parallelism. The split is: - folders under `tests`: (`tokenization`, `pipelines`, etc) except the subfolder `models` is excluded. - folders under `tests/models`: `bert`, `gpt2`, etc. - test files under `tests`: `test_modeling_common.py`, `test_tokenization_common.py`, etc. """ # test folders/files directly under `tests` folder tests = os.listdir(PATH_TO_TESTS) tests = [f"tests/{f}" for f in tests if "__pycache__" not in f] tests = sorted([f for f in tests if (PATH_TO_REPO / f).is_dir() or f.startswith("tests/test_")]) # model specific test folders model_test_folders = os.listdir(PATH_TO_TESTS / "models") model_test_folders = [f"tests/models/{f}" for f in model_test_folders if "__pycache__" not in f] model_test_folders = sorted([f for f in model_test_folders if (PATH_TO_REPO / f).is_dir()]) tests.remove("tests/models") # Sagemaker tests are not meant to be run on the CI. if "tests/sagemaker" in tests: tests.remove("tests/sagemaker") tests = model_test_folders + tests return tests
Walks the `tests` folder to return a list of files/subfolders. This is used to split the tests to run when using parallelism. The split is: - folders under `tests`: (`tokenization`, `pipelines`, etc) except the subfolder `models` is excluded. - folders under `tests/models`: `bert`, `gpt2`, etc. - test files under `tests`: `test_modeling_common.py`, `test_tokenization_common.py`, etc.
get_all_tests
python
huggingface/transformers
utils/tests_fetcher.py
https://github.com/huggingface/transformers/blob/master/utils/tests_fetcher.py
Apache-2.0
def get_all_doctest_files() -> List[str]: """ Return the complete list of python and Markdown files on which we run doctest. At this moment, we restrict this to only take files from `src/` or `docs/source/en/` that are not in `utils/not_doctested.txt`. Returns: `List[str]`: The complete list of Python and Markdown files on which we run doctest. """ py_files = [str(x.relative_to(PATH_TO_REPO)) for x in PATH_TO_REPO.glob("**/*.py")] md_files = [str(x.relative_to(PATH_TO_REPO)) for x in PATH_TO_REPO.glob("**/*.md")] test_files_to_run = py_files + md_files # change to use "/" as path separator test_files_to_run = ["/".join(Path(x).parts) for x in test_files_to_run] # don't run doctest for files in `src/transformers/models/deprecated` test_files_to_run = [x for x in test_files_to_run if "models/deprecated" not in x] # only include files in `src` or `docs/source/en/` test_files_to_run = [x for x in test_files_to_run if x.startswith(("src/", "docs/source/en/"))] # not include init files test_files_to_run = [x for x in test_files_to_run if not x.endswith(("__init__.py",))] # These are files not doctested yet. with open("utils/not_doctested.txt") as fp: not_doctested = {x.split(" ")[0] for x in fp.read().strip().split("\n")} # So far we don't have 100% coverage for doctest. This line will be removed once we achieve 100%. test_files_to_run = [x for x in test_files_to_run if x not in not_doctested] return sorted(test_files_to_run)
Return the complete list of python and Markdown files on which we run doctest. At this moment, we restrict this to only take files from `src/` or `docs/source/en/` that are not in `utils/not_doctested.txt`. Returns: `List[str]`: The complete list of Python and Markdown files on which we run doctest.
get_all_doctest_files
python
huggingface/transformers
utils/tests_fetcher.py
https://github.com/huggingface/transformers/blob/master/utils/tests_fetcher.py
Apache-2.0
def extract_imports(module_fname: str, cache: Optional[Dict[str, List[str]]] = None) -> List[str]: """ Get the imports a given module makes. Args: module_fname (`str`): The name of the file of the module where we want to look at the imports (given relative to the root of the repo). cache (Dictionary `str` to `List[str]`, *optional*): To speed up this function if it was previously called on `module_fname`, the cache of all previously computed results. Returns: `List[str]`: The list of module filenames imported in the input `module_fname` (a submodule we import from that is a subfolder will give its init file). """ if cache is not None and module_fname in cache: return cache[module_fname] with open(PATH_TO_REPO / module_fname, "r", encoding="utf-8") as f: content = f.read() # Filter out all docstrings to not get imports in code examples. As before we need to deactivate formatting to # keep this as escaped quotes and avoid this function failing on this file. splits = content.split('\"\"\"') # fmt: skip content = "".join(splits[::2]) module_parts = str(module_fname).split(os.path.sep) imported_modules = [] # Let's start with relative imports relative_imports = _re_single_line_relative_imports.findall(content) relative_imports = [ (mod, imp) for mod, imp in relative_imports if "# tests_ignore" not in imp and imp.strip() != "(" ] multiline_relative_imports = _re_multi_line_relative_imports.findall(content) relative_imports += [(mod, imp) for mod, imp in multiline_relative_imports if "# tests_ignore" not in imp] # We need to remove parts of the module name depending on the depth of the relative imports. for module, imports in relative_imports: level = 0 while module.startswith("."): module = module[1:] level += 1 if len(module) > 0: dep_parts = module_parts[: len(module_parts) - level] + module.split(".") else: dep_parts = module_parts[: len(module_parts) - level] imported_module = os.path.sep.join(dep_parts) imported_modules.append((imported_module, [imp.strip() for imp in imports.split(",")])) # Let's continue with direct imports direct_imports = _re_single_line_direct_imports.findall(content) direct_imports = [(mod, imp) for mod, imp in direct_imports if "# tests_ignore" not in imp and imp.strip() != "("] multiline_direct_imports = _re_multi_line_direct_imports.findall(content) direct_imports += [(mod, imp) for mod, imp in multiline_direct_imports if "# tests_ignore" not in imp] # We need to find the relative path of those imports. for module, imports in direct_imports: import_parts = module.split(".")[1:] # ignore the name of the repo since we add it below. dep_parts = ["src", "transformers"] + import_parts imported_module = os.path.sep.join(dep_parts) imported_modules.append((imported_module, [imp.strip() for imp in imports.split(",")])) result = [] # Double check we get proper modules (either a python file or a folder with an init). for module_file, imports in imported_modules: if (PATH_TO_REPO / f"{module_file}.py").is_file(): module_file = f"{module_file}.py" elif (PATH_TO_REPO / module_file).is_dir() and (PATH_TO_REPO / module_file / "__init__.py").is_file(): module_file = os.path.sep.join([module_file, "__init__.py"]) imports = [imp for imp in imports if len(imp) > 0 and re.match("^[A-Za-z0-9_]*$", imp)] if len(imports) > 0: result.append((module_file, imports)) if cache is not None: cache[module_fname] = result return result
Get the imports a given module makes. Args: module_fname (`str`): The name of the file of the module where we want to look at the imports (given relative to the root of the repo). cache (Dictionary `str` to `List[str]`, *optional*): To speed up this function if it was previously called on `module_fname`, the cache of all previously computed results. Returns: `List[str]`: The list of module filenames imported in the input `module_fname` (a submodule we import from that is a subfolder will give its init file).
extract_imports
python
huggingface/transformers
utils/tests_fetcher.py
https://github.com/huggingface/transformers/blob/master/utils/tests_fetcher.py
Apache-2.0
def get_module_dependencies(module_fname: str, cache: Optional[Dict[str, List[str]]] = None) -> List[str]: """ Refines the result of `extract_imports` to remove subfolders and get a proper list of module filenames: if a file as an import `from utils import Foo, Bar`, with `utils` being a subfolder containing many files, this will traverse the `utils` init file to check where those dependencies come from: for instance the files utils/foo.py and utils/bar.py. Warning: This presupposes that all intermediate inits are properly built (with imports from the respective submodules) and work better if objects are defined in submodules and not the intermediate init (otherwise the intermediate init is added, and inits usually have a lot of dependencies). Args: module_fname (`str`): The name of the file of the module where we want to look at the imports (given relative to the root of the repo). cache (Dictionary `str` to `List[str]`, *optional*): To speed up this function if it was previously called on `module_fname`, the cache of all previously computed results. Returns: `List[str]`: The list of module filenames imported in the input `module_fname` (with submodule imports refined). """ dependencies = [] imported_modules = extract_imports(module_fname, cache=cache) # The while loop is to recursively traverse all inits we may encounter: we will add things as we go. while len(imported_modules) > 0: new_modules = [] for module, imports in imported_modules: if "models" in module.split("/") and module.split("/")[-1].startswith("convert_"): continue # If we end up in an __init__ we are often not actually importing from this init (except in the case where # the object is fully defined in the __init__) if module.endswith("__init__.py"): # So we get the imports from that init then try to find where our objects come from. new_imported_modules = dict(extract_imports(module, cache=cache)) # Add imports via `define_import_structure` after the #35167 as we remove explicit import in `__init__.py` from transformers.utils.import_utils import define_import_structure new_imported_modules_from_import_structure = define_import_structure(PATH_TO_REPO / module) for mapping in new_imported_modules_from_import_structure.values(): for _module, _imports in mapping.items(): # Import Structure returns _module keys as import paths rather than local paths # We replace with os.path.sep so that it's Windows-compatible _module = _module.replace(".", os.path.sep) _module = module.replace("__init__.py", f"{_module}.py") if _module not in new_imported_modules: new_imported_modules[_module] = list(_imports) else: original_imports = new_imported_modules[_module] for potential_new_item in list(_imports): if potential_new_item not in original_imports: new_imported_modules[_module].append(potential_new_item) for new_module, new_imports in new_imported_modules.items(): if any(i in new_imports for i in imports): if new_module not in dependencies: new_modules.append((new_module, [i for i in new_imports if i in imports])) imports = [i for i in imports if i not in new_imports] if len(imports) > 0: # If there are any objects lefts, they may be a submodule path_to_module = PATH_TO_REPO / module.replace("__init__.py", "") dependencies.extend( [ os.path.join(module.replace("__init__.py", ""), f"{i}.py") for i in imports if (path_to_module / f"{i}.py").is_file() ] ) imports = [i for i in imports if not (path_to_module / f"{i}.py").is_file()] if len(imports) > 0: # Then if there are still objects left, they are fully defined in the init, so we keep it as a # dependency. dependencies.append(module) else: dependencies.append(module) imported_modules = new_modules return dependencies
Refines the result of `extract_imports` to remove subfolders and get a proper list of module filenames: if a file as an import `from utils import Foo, Bar`, with `utils` being a subfolder containing many files, this will traverse the `utils` init file to check where those dependencies come from: for instance the files utils/foo.py and utils/bar.py. Warning: This presupposes that all intermediate inits are properly built (with imports from the respective submodules) and work better if objects are defined in submodules and not the intermediate init (otherwise the intermediate init is added, and inits usually have a lot of dependencies). Args: module_fname (`str`): The name of the file of the module where we want to look at the imports (given relative to the root of the repo). cache (Dictionary `str` to `List[str]`, *optional*): To speed up this function if it was previously called on `module_fname`, the cache of all previously computed results. Returns: `List[str]`: The list of module filenames imported in the input `module_fname` (with submodule imports refined).
get_module_dependencies
python
huggingface/transformers
utils/tests_fetcher.py
https://github.com/huggingface/transformers/blob/master/utils/tests_fetcher.py
Apache-2.0
def create_reverse_dependency_tree() -> List[Tuple[str, str]]: """ Create a list of all edges (a, b) which mean that modifying a impacts b with a going over all module and test files. """ cache = {} all_modules = list(PATH_TO_TRANFORMERS.glob("**/*.py")) all_modules = [x for x in all_modules if not ("models" in x.parts and x.parts[-1].startswith("convert_"))] all_modules += list(PATH_TO_TESTS.glob("**/*.py")) all_modules = [str(mod.relative_to(PATH_TO_REPO)) for mod in all_modules] edges = [(dep, mod) for mod in all_modules for dep in get_module_dependencies(mod, cache=cache)] return list(set(edges))
Create a list of all edges (a, b) which mean that modifying a impacts b with a going over all module and test files.
create_reverse_dependency_tree
python
huggingface/transformers
utils/tests_fetcher.py
https://github.com/huggingface/transformers/blob/master/utils/tests_fetcher.py
Apache-2.0
def get_tree_starting_at(module: str, edges: List[Tuple[str, str]]) -> List[Union[str, List[str]]]: """ Returns the tree starting at a given module following all edges. Args: module (`str`): The module that will be the root of the subtree we want. eges (`List[Tuple[str, str]]`): The list of all edges of the tree. Returns: `List[Union[str, List[str]]]`: The tree to print in the following format: [module, [list of edges starting at module], [list of edges starting at the preceding level], ...] """ vertices_seen = [module] new_edges = [edge for edge in edges if edge[0] == module and edge[1] != module and "__init__.py" not in edge[1]] tree = [module] while len(new_edges) > 0: tree.append(new_edges) final_vertices = list({edge[1] for edge in new_edges}) vertices_seen.extend(final_vertices) new_edges = [ edge for edge in edges if edge[0] in final_vertices and edge[1] not in vertices_seen and "__init__.py" not in edge[1] ] return tree
Returns the tree starting at a given module following all edges. Args: module (`str`): The module that will be the root of the subtree we want. eges (`List[Tuple[str, str]]`): The list of all edges of the tree. Returns: `List[Union[str, List[str]]]`: The tree to print in the following format: [module, [list of edges starting at module], [list of edges starting at the preceding level], ...]
get_tree_starting_at
python
huggingface/transformers
utils/tests_fetcher.py
https://github.com/huggingface/transformers/blob/master/utils/tests_fetcher.py
Apache-2.0
def print_tree_deps_of(module, all_edges=None): """ Prints the tree of modules depending on a given module. Args: module (`str`): The module that will be the root of the subtree we want. all_eges (`List[Tuple[str, str]]`, *optional*): The list of all edges of the tree. Will be set to `create_reverse_dependency_tree()` if not passed. """ if all_edges is None: all_edges = create_reverse_dependency_tree() tree = get_tree_starting_at(module, all_edges) # The list of lines is a list of tuples (line_to_be_printed, module) # Keeping the modules lets us know where to insert each new lines in the list. lines = [(tree[0], tree[0])] for index in range(1, len(tree)): edges = tree[index] start_edges = {edge[0] for edge in edges} for start in start_edges: end_edges = {edge[1] for edge in edges if edge[0] == start} # We will insert all those edges just after the line showing start. pos = 0 while lines[pos][1] != start: pos += 1 lines = lines[: pos + 1] + [(" " * (2 * index) + end, end) for end in end_edges] + lines[pos + 1 :] for line in lines: # We don't print the refs that where just here to help build lines. print(line[0])
Prints the tree of modules depending on a given module. Args: module (`str`): The module that will be the root of the subtree we want. all_eges (`List[Tuple[str, str]]`, *optional*): The list of all edges of the tree. Will be set to `create_reverse_dependency_tree()` if not passed.
print_tree_deps_of
python
huggingface/transformers
utils/tests_fetcher.py
https://github.com/huggingface/transformers/blob/master/utils/tests_fetcher.py
Apache-2.0
def init_test_examples_dependencies() -> Tuple[Dict[str, List[str]], List[str]]: """ The test examples do not import from the examples (which are just scripts, not modules) so we need some extra care initializing the dependency map, which is the goal of this function. It initializes the dependency map for example files by linking each example to the example test file for the example framework. Returns: `Tuple[Dict[str, List[str]], List[str]]`: A tuple with two elements: the initialized dependency map which is a dict test example file to list of example files potentially tested by that test file, and the list of all example files (to avoid recomputing it later). """ test_example_deps = {} all_examples = [] for framework in ["flax", "pytorch", "tensorflow"]: test_files = list((PATH_TO_EXAMPLES / framework).glob("test_*.py")) all_examples.extend(test_files) # Remove the files at the root of examples/framework since they are not proper examples (they are eith utils # or example test files). examples = [ f for f in (PATH_TO_EXAMPLES / framework).glob("**/*.py") if f.parent != PATH_TO_EXAMPLES / framework ] all_examples.extend(examples) for test_file in test_files: with open(test_file, "r", encoding="utf-8") as f: content = f.read() # Map all examples to the test files found in examples/framework. test_example_deps[str(test_file.relative_to(PATH_TO_REPO))] = [ str(e.relative_to(PATH_TO_REPO)) for e in examples if e.name in content ] # Also map the test files to themselves. test_example_deps[str(test_file.relative_to(PATH_TO_REPO))].append( str(test_file.relative_to(PATH_TO_REPO)) ) return test_example_deps, all_examples
The test examples do not import from the examples (which are just scripts, not modules) so we need some extra care initializing the dependency map, which is the goal of this function. It initializes the dependency map for example files by linking each example to the example test file for the example framework. Returns: `Tuple[Dict[str, List[str]], List[str]]`: A tuple with two elements: the initialized dependency map which is a dict test example file to list of example files potentially tested by that test file, and the list of all example files (to avoid recomputing it later).
init_test_examples_dependencies
python
huggingface/transformers
utils/tests_fetcher.py
https://github.com/huggingface/transformers/blob/master/utils/tests_fetcher.py
Apache-2.0
def create_reverse_dependency_map() -> Dict[str, List[str]]: """ Create the dependency map from module/test filename to the list of modules/tests that depend on it recursively. Returns: `Dict[str, List[str]]`: The reverse dependency map as a dictionary mapping filenames to all the filenames depending on it recursively. This way the tests impacted by a change in file A are the test files in the list corresponding to key A in this result. """ cache = {} # Start from the example deps init. example_deps, examples = init_test_examples_dependencies() # Add all modules and all tests to all examples all_modules = list(PATH_TO_TRANFORMERS.glob("**/*.py")) all_modules = [x for x in all_modules if not ("models" in x.parts and x.parts[-1].startswith("convert_"))] all_modules += list(PATH_TO_TESTS.glob("**/*.py")) + examples all_modules = [str(mod.relative_to(PATH_TO_REPO)) for mod in all_modules] # Compute the direct dependencies of all modules. direct_deps = {m: get_module_dependencies(m, cache=cache) for m in all_modules} direct_deps.update(example_deps) # This recurses the dependencies something_changed = True while something_changed: something_changed = False for m in all_modules: for d in direct_deps[m]: # We stop recursing at an init (cause we always end up in the main init and we don't want to add all # files which the main init imports) if d.endswith("__init__.py"): continue if d not in direct_deps: raise ValueError(f"KeyError:{d}. From {m}") new_deps = set(direct_deps[d]) - set(direct_deps[m]) if len(new_deps) > 0: direct_deps[m].extend(list(new_deps)) something_changed = True # Finally we can build the reverse map. reverse_map = collections.defaultdict(list) for m in all_modules: for d in direct_deps[m]: reverse_map[d].append(m) # For inits, we don't do the reverse deps but the direct deps: if modifying an init, we want to make sure we test # all the modules impacted by that init. for m in [f for f in all_modules if f.endswith("__init__.py")]: direct_deps = get_module_dependencies(m, cache=cache) deps = sum([reverse_map[d] for d in direct_deps if not d.endswith("__init__.py")], direct_deps) reverse_map[m] = list(set(deps) - {m}) return reverse_map
Create the dependency map from module/test filename to the list of modules/tests that depend on it recursively. Returns: `Dict[str, List[str]]`: The reverse dependency map as a dictionary mapping filenames to all the filenames depending on it recursively. This way the tests impacted by a change in file A are the test files in the list corresponding to key A in this result.
create_reverse_dependency_map
python
huggingface/transformers
utils/tests_fetcher.py
https://github.com/huggingface/transformers/blob/master/utils/tests_fetcher.py
Apache-2.0
def create_module_to_test_map( reverse_map: Optional[Dict[str, List[str]]] = None, filter_models: bool = False ) -> Dict[str, List[str]]: """ Extract the tests from the reverse_dependency_map and potentially filters the model tests. Args: reverse_map (`Dict[str, List[str]]`, *optional*): The reverse dependency map as created by `create_reverse_dependency_map`. Will default to the result of that function if not provided. filter_models (`bool`, *optional*, defaults to `False`): Whether or not to filter model tests to only include core models if a file impacts a lot of models. Returns: `Dict[str, List[str]]`: A dictionary that maps each file to the tests to execute if that file was modified. """ if reverse_map is None: reverse_map = create_reverse_dependency_map() # Utility that tells us if a given file is a test (taking test examples into account) def is_test(fname): if fname.startswith("tests"): return True if fname.startswith("examples") and fname.split(os.path.sep)[-1].startswith("test"): return True return False # Build the test map test_map = {module: [f for f in deps if is_test(f)] for module, deps in reverse_map.items()} if not filter_models: return test_map # Now we deal with the filtering if `filter_models` is True. num_model_tests = len(list(PATH_TO_TESTS.glob("models/*"))) def has_many_models(tests): # We filter to core models when a given file impacts more than half the model tests. model_tests = {Path(t).parts[2] for t in tests if t.startswith("tests/models/")} return len(model_tests) > num_model_tests // 2 # for each module (if specified in the argument `module`) of the form `models/my_model` (i.e. starting with it), # we always keep the tests (those are already in the argument `tests`) which are in `tests/models/my_model`. # This is to avoid them being excluded when a module has many impacted tests: the directly related test files should # always be included! def filter_tests(tests, module=""): filtered_tests = [] for t in tests: if ( not t.startswith("tests/models/") or Path(t).parts[2] in IMPORTANT_MODELS # at this point, `t` is of the form `tests/models/my_model`, and we check if `models/my_model` # (i.e. `parts[1:3]`) is in `module`. or "/".join(Path(t).parts[1:3]) in module ): filtered_tests += [t] return filtered_tests return { module: (filter_tests(tests, module=module) if has_many_models(tests) else tests) for module, tests in test_map.items() }
Extract the tests from the reverse_dependency_map and potentially filters the model tests. Args: reverse_map (`Dict[str, List[str]]`, *optional*): The reverse dependency map as created by `create_reverse_dependency_map`. Will default to the result of that function if not provided. filter_models (`bool`, *optional*, defaults to `False`): Whether or not to filter model tests to only include core models if a file impacts a lot of models. Returns: `Dict[str, List[str]]`: A dictionary that maps each file to the tests to execute if that file was modified.
create_module_to_test_map
python
huggingface/transformers
utils/tests_fetcher.py
https://github.com/huggingface/transformers/blob/master/utils/tests_fetcher.py
Apache-2.0
def filter_tests(output_file: str, filters: List[str]): """ Reads the content of the output file and filters out all the tests in a list of given folders. Args: output_file (`str` or `os.PathLike`): The path to the output file of the tests fetcher. filters (`List[str]`): A list of folders to filter. """ if not os.path.isfile(output_file): print("No test file found.") return with open(output_file, "r", encoding="utf-8") as f: test_files = f.read().split(" ") if len(test_files) == 0 or test_files == [""]: print("No tests to filter.") return if test_files == ["tests"]: test_files = [os.path.join("tests", f) for f in os.listdir("tests") if f not in ["__init__.py"] + filters] else: test_files = [f for f in test_files if f.split(os.path.sep)[1] not in filters] with open(output_file, "w", encoding="utf-8") as f: f.write(" ".join(test_files))
Reads the content of the output file and filters out all the tests in a list of given folders. Args: output_file (`str` or `os.PathLike`): The path to the output file of the tests fetcher. filters (`List[str]`): A list of folders to filter.
filter_tests
python
huggingface/transformers
utils/tests_fetcher.py
https://github.com/huggingface/transformers/blob/master/utils/tests_fetcher.py
Apache-2.0
def camel_case_split(identifier: str) -> List[str]: """ Split a camel-cased name into words. Args: identifier (`str`): The camel-cased name to parse. Returns: `List[str]`: The list of words in the identifier (as separated by capital letters). Example: ```py >>> camel_case_split("CamelCasedClass") ["Camel", "Cased", "Class"] ``` """ # Regex thanks to https://stackoverflow.com/questions/29916065/how-to-do-camelcase-split-in-python matches = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)", identifier) return [m.group(0) for m in matches]
Split a camel-cased name into words. Args: identifier (`str`): The camel-cased name to parse. Returns: `List[str]`: The list of words in the identifier (as separated by capital letters). Example: ```py >>> camel_case_split("CamelCasedClass") ["Camel", "Cased", "Class"] ```
camel_case_split
python
huggingface/transformers
utils/update_metadata.py
https://github.com/huggingface/transformers/blob/master/utils/update_metadata.py
Apache-2.0
def get_frameworks_table() -> pd.DataFrame: """ Generates a dataframe containing the supported auto classes for each model type, using the content of the auto modules. """ # Dictionary model names to config. config_maping_names = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES model_prefix_to_model_type = { config.replace("Config", ""): model_type for model_type, config in config_maping_names.items() } # Dictionaries flagging if each model prefix has a backend in PT/TF/Flax. pt_models = collections.defaultdict(bool) tf_models = collections.defaultdict(bool) flax_models = collections.defaultdict(bool) # Let's lookup through all transformers object (once) and find if models are supported by a given backend. for attr_name in dir(transformers_module): lookup_dict = None if _re_tf_models.match(attr_name) is not None: lookup_dict = tf_models attr_name = _re_tf_models.match(attr_name).groups()[0] elif _re_flax_models.match(attr_name) is not None: lookup_dict = flax_models attr_name = _re_flax_models.match(attr_name).groups()[0] elif _re_pt_models.match(attr_name) is not None: lookup_dict = pt_models attr_name = _re_pt_models.match(attr_name).groups()[0] if lookup_dict is not None: while len(attr_name) > 0: if attr_name in model_prefix_to_model_type: lookup_dict[model_prefix_to_model_type[attr_name]] = True break # Try again after removing the last word in the name attr_name = "".join(camel_case_split(attr_name)[:-1]) all_models = set(list(pt_models.keys()) + list(tf_models.keys()) + list(flax_models.keys())) all_models = list(all_models) all_models.sort() data = {"model_type": all_models} data["pytorch"] = [pt_models[t] for t in all_models] data["tensorflow"] = [tf_models[t] for t in all_models] data["flax"] = [flax_models[t] for t in all_models] # Now let's find the right processing class for each model. In order we check if there is a Processor, then a # Tokenizer, then a FeatureExtractor, then an ImageProcessor processors = {} for t in all_models: if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES: processors[t] = "AutoProcessor" elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES: processors[t] = "AutoTokenizer" elif t in transformers_module.models.auto.image_processing_auto.IMAGE_PROCESSOR_MAPPING_NAMES: processors[t] = "AutoImageProcessor" elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES: processors[t] = "AutoFeatureExtractor" else: # Default to AutoTokenizer if a model has nothing, for backward compatibility. processors[t] = "AutoTokenizer" data["processor"] = [processors[t] for t in all_models] return pd.DataFrame(data)
Generates a dataframe containing the supported auto classes for each model type, using the content of the auto modules.
get_frameworks_table
python
huggingface/transformers
utils/update_metadata.py
https://github.com/huggingface/transformers/blob/master/utils/update_metadata.py
Apache-2.0
def update_pipeline_and_auto_class_table(table: Dict[str, Tuple[str, str]]) -> Dict[str, Tuple[str, str]]: """ Update the table mapping models to pipelines and auto classes without removing old keys if they don't exist anymore. Args: table (`Dict[str, Tuple[str, str]]`): The existing table mapping model names to a tuple containing the pipeline tag and the auto-class name with which they should be used. Returns: `Dict[str, Tuple[str, str]]`: The updated table in the same format. """ auto_modules = [ transformers_module.models.auto.modeling_auto, transformers_module.models.auto.modeling_tf_auto, transformers_module.models.auto.modeling_flax_auto, ] for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS: model_mappings = [model_mapping, f"TF_{model_mapping}", f"FLAX_{model_mapping}"] auto_classes = [auto_class, f"TF_{auto_class}", f"Flax_{auto_class}"] # Loop through all three frameworks for module, cls, mapping in zip(auto_modules, auto_classes, model_mappings): # The type of pipeline may not exist in this framework if not hasattr(module, mapping): continue # First extract all model_names model_names = [] for name in getattr(module, mapping).values(): if isinstance(name, str): model_names.append(name) else: model_names.extend(list(name)) # Add pipeline tag and auto model class for those models table.update(dict.fromkeys(model_names, (pipeline_tag, cls))) return table
Update the table mapping models to pipelines and auto classes without removing old keys if they don't exist anymore. Args: table (`Dict[str, Tuple[str, str]]`): The existing table mapping model names to a tuple containing the pipeline tag and the auto-class name with which they should be used. Returns: `Dict[str, Tuple[str, str]]`: The updated table in the same format.
update_pipeline_and_auto_class_table
python
huggingface/transformers
utils/update_metadata.py
https://github.com/huggingface/transformers/blob/master/utils/update_metadata.py
Apache-2.0
def check_pipeline_tags(): """ Check all pipeline tags are properly defined in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant of this script. """ in_table = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS} pipeline_tasks = transformers_module.pipelines.SUPPORTED_TASKS missing = [] for key in pipeline_tasks: if key not in in_table: model = pipeline_tasks[key]["pt"] if isinstance(model, (list, tuple)): model = model[0] model = model.__name__ if model not in in_table.values(): missing.append(key) if len(missing) > 0: msg = ", ".join(missing) raise ValueError( "The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside " f"`utils/update_metadata.py`: {msg}. Please add them!" )
Check all pipeline tags are properly defined in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant of this script.
check_pipeline_tags
python
huggingface/transformers
utils/update_metadata.py
https://github.com/huggingface/transformers/blob/master/utils/update_metadata.py
Apache-2.0
async def lifespan(app: FastAPI): """ An asynchronous context manager for managing the lifecycle of the FastAPI app. It ensures that GPU memory is cleared after the app's lifecycle ends, which is essential for efficient resource management in GPU environments. """ yield if torch.cuda.is_available(): torch.cuda.empty_cache() torch.cuda.ipc_collect()
An asynchronous context manager for managing the lifecycle of the FastAPI app. It ensures that GPU memory is cleared after the app's lifecycle ends, which is essential for efficient resource management in GPU environments.
lifespan
python
THUDM/CogVLM2
basic_demo/openai_api_demo.py
https://github.com/THUDM/CogVLM2/blob/master/basic_demo/openai_api_demo.py
Apache-2.0
async def list_models(): """ An endpoint to list available models. It returns a list of model cards. This is useful for clients to query and understand what models are available for use. """ model_card = ModelCard(id="cogvlm2-19b") return ModelList(data=[model_card])
An endpoint to list available models. It returns a list of model cards. This is useful for clients to query and understand what models are available for use.
list_models
python
THUDM/CogVLM2
basic_demo/openai_api_demo.py
https://github.com/THUDM/CogVLM2/blob/master/basic_demo/openai_api_demo.py
Apache-2.0
def generate_cogvlm(model: AutoModelForCausalLM, tokenizer: AutoTokenizer, params: dict): """ Generates a response using the CogVLM2 model. It processes the chat history and image data, if any, and then invokes the model to generate a response. """ response = None for response in generate_stream_cogvlm(model, tokenizer, params): pass return response
Generates a response using the CogVLM2 model. It processes the chat history and image data, if any, and then invokes the model to generate a response.
generate_cogvlm
python
THUDM/CogVLM2
basic_demo/openai_api_demo.py
https://github.com/THUDM/CogVLM2/blob/master/basic_demo/openai_api_demo.py
Apache-2.0
def unboxn(vin, n): """vin = (batch, h, w, depth), returns vout = (batch, n*h, n*w, depth), each pixel is duplicated.""" s = tf.shape(vin) vout = tf.concat([vin] * (n ** 2), 0) # Poor man's replacement for tf.tile (required for Adversarial Training support). vout = tf.reshape(vout, [s[0] * (n ** 2), s[1], s[2], s[3]]) vout = tf.batch_to_space(vout, [[0, 0], [0, 0]], n) return vout
vin = (batch, h, w, depth), returns vout = (batch, n*h, n*w, depth), each pixel is duplicated.
unboxn
python
carpedm20/BEGAN-tensorflow
layers.py
https://github.com/carpedm20/BEGAN-tensorflow/blob/master/layers.py
Apache-2.0
def boxn(vin, n): """vin = (batch, h, w, depth), returns vout = (batch, h//n, w//n, depth), each pixel is averaged.""" if n == 1: return vin s = tf.shape(vin) vout = tf.reshape(vin, [s[0], s[1] // n, n, s[2] // n, n, s[3]]) vout = tf.reduce_mean(vout, [2, 4]) return vout
vin = (batch, h, w, depth), returns vout = (batch, h//n, w//n, depth), each pixel is averaged.
boxn
python
carpedm20/BEGAN-tensorflow
layers.py
https://github.com/carpedm20/BEGAN-tensorflow/blob/master/layers.py
Apache-2.0
def make_grid(tensor, nrow=8, padding=2, normalize=False, scale_each=False): """Code based on https://github.com/pytorch/vision/blob/master/torchvision/utils.py""" nmaps = tensor.shape[0] xmaps = min(nrow, nmaps) ymaps = int(math.ceil(float(nmaps) / xmaps)) height, width = int(tensor.shape[1] + padding), int(tensor.shape[2] + padding) grid = np.zeros([height * ymaps + 1 + padding // 2, width * xmaps + 1 + padding // 2, 3], dtype=np.uint8) k = 0 for y in range(ymaps): for x in range(xmaps): if k >= nmaps: break h, h_width = y * height + 1 + padding // 2, height - padding w, w_width = x * width + 1 + padding // 2, width - padding grid[h:h+h_width, w:w+w_width] = tensor[k] k = k + 1 return grid
Code based on https://github.com/pytorch/vision/blob/master/torchvision/utils.py
make_grid
python
carpedm20/BEGAN-tensorflow
utils.py
https://github.com/carpedm20/BEGAN-tensorflow/blob/master/utils.py
Apache-2.0
def index(): """Searches the database for entries, then displays them.""" entries = db.session.query(models.Post) return render_template("index.html", entries=entries)
Searches the database for entries, then displays them.
index
python
mjhea0/flaskr-tdd
project/app.py
https://github.com/mjhea0/flaskr-tdd/blob/master/project/app.py
MIT
def add_entry(): """Adds new post to the database.""" if not session.get("logged_in"): abort(401) new_entry = models.Post(request.form["title"], request.form["text"]) db.session.add(new_entry) db.session.commit() flash("New entry was successfully posted") return redirect(url_for("index"))
Adds new post to the database.
add_entry
python
mjhea0/flaskr-tdd
project/app.py
https://github.com/mjhea0/flaskr-tdd/blob/master/project/app.py
MIT
def test_login_logout(client): """Test login and logout using helper functions""" rv = login(client, app.config["USERNAME"], app.config["PASSWORD"]) assert b"You were logged in" in rv.data rv = logout(client) assert b"You were logged out" in rv.data rv = login(client, app.config["USERNAME"] + "x", app.config["PASSWORD"]) assert b"Invalid username" in rv.data rv = login(client, app.config["USERNAME"], app.config["PASSWORD"] + "x") assert b"Invalid password" in rv.data
Test login and logout using helper functions
test_login_logout
python
mjhea0/flaskr-tdd
tests/app_test.py
https://github.com/mjhea0/flaskr-tdd/blob/master/tests/app_test.py
MIT
def test_messages(client): """Ensure that user can post messages""" login(client, app.config["USERNAME"], app.config["PASSWORD"]) rv = client.post( "/add", data=dict(title="<Hello>", text="<strong>HTML</strong> allowed here"), follow_redirects=True, ) assert b"No entries here so far" not in rv.data assert b"&lt;Hello&gt;" in rv.data assert b"<strong>HTML</strong> allowed here" in rv.data
Ensure that user can post messages
test_messages
python
mjhea0/flaskr-tdd
tests/app_test.py
https://github.com/mjhea0/flaskr-tdd/blob/master/tests/app_test.py
MIT
def test_delete_message(client): """Ensure the messages are being deleted""" rv = client.get("/delete/1") data = json.loads(rv.data) assert data["status"] == 0 login(client, app.config["USERNAME"], app.config["PASSWORD"]) rv = client.get("/delete/1") data = json.loads(rv.data) assert data["status"] == 1
Ensure the messages are being deleted
test_delete_message
python
mjhea0/flaskr-tdd
tests/app_test.py
https://github.com/mjhea0/flaskr-tdd/blob/master/tests/app_test.py
MIT
def unique_config_sections(config_file): """Convert all config sections to have unique names. Adds unique suffixes to config sections for compability with configparser. """ section_counters = defaultdict(int) output_stream = io.StringIO() with open(config_file) as fin: for line in fin: if line.startswith('['): section = line.strip().strip('[]') _section = section + '_' + str(section_counters[section]) section_counters[section] += 1 line = line.replace(section, _section) output_stream.write(line) output_stream.seek(0) return output_stream
Convert all config sections to have unique names. Adds unique suffixes to config sections for compability with configparser.
unique_config_sections
python
qqwweee/keras-yolo3
convert.py
https://github.com/qqwweee/keras-yolo3/blob/master/convert.py
MIT
def create_tiny_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2, weights_path='model_data/tiny_yolo_weights.h5'): '''create the training model, for Tiny YOLOv3''' K.clear_session() # get a new session image_input = Input(shape=(None, None, 3)) h, w = input_shape num_anchors = len(anchors) y_true = [Input(shape=(h//{0:32, 1:16}[l], w//{0:32, 1:16}[l], \ num_anchors//2, num_classes+5)) for l in range(2)] model_body = tiny_yolo_body(image_input, num_anchors//2, num_classes) print('Create Tiny YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes)) if load_pretrained: model_body.load_weights(weights_path, by_name=True, skip_mismatch=True) print('Load weights {}.'.format(weights_path)) if freeze_body in [1, 2]: # Freeze the darknet body or freeze all but 2 output layers. num = (20, len(model_body.layers)-2)[freeze_body-1] for i in range(num): model_body.layers[i].trainable = False print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers))) model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss', arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.7})( [*model_body.output, *y_true]) model = Model([model_body.input, *y_true], model_loss) return model
create the training model, for Tiny YOLOv3
create_tiny_model
python
qqwweee/keras-yolo3
train.py
https://github.com/qqwweee/keras-yolo3/blob/master/train.py
MIT
def DarknetConv2D(*args, **kwargs): """Wrapper to set Darknet parameters for Convolution2D.""" darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)} darknet_conv_kwargs['padding'] = 'valid' if kwargs.get('strides')==(2,2) else 'same' darknet_conv_kwargs.update(kwargs) return Conv2D(*args, **darknet_conv_kwargs)
Wrapper to set Darknet parameters for Convolution2D.
DarknetConv2D
python
qqwweee/keras-yolo3
yolo3/model.py
https://github.com/qqwweee/keras-yolo3/blob/master/yolo3/model.py
MIT
def DarknetConv2D_BN_Leaky(*args, **kwargs): """Darknet Convolution2D followed by BatchNormalization and LeakyReLU.""" no_bias_kwargs = {'use_bias': False} no_bias_kwargs.update(kwargs) return compose( DarknetConv2D(*args, **no_bias_kwargs), BatchNormalization(), LeakyReLU(alpha=0.1))
Darknet Convolution2D followed by BatchNormalization and LeakyReLU.
DarknetConv2D_BN_Leaky
python
qqwweee/keras-yolo3
yolo3/model.py
https://github.com/qqwweee/keras-yolo3/blob/master/yolo3/model.py
MIT
def resblock_body(x, num_filters, num_blocks): '''A series of resblocks starting with a downsampling Convolution2D''' # Darknet uses left and top padding instead of 'same' mode x = ZeroPadding2D(((1,0),(1,0)))(x) x = DarknetConv2D_BN_Leaky(num_filters, (3,3), strides=(2,2))(x) for i in range(num_blocks): y = compose( DarknetConv2D_BN_Leaky(num_filters//2, (1,1)), DarknetConv2D_BN_Leaky(num_filters, (3,3)))(x) x = Add()([x,y]) return x
A series of resblocks starting with a downsampling Convolution2D
resblock_body
python
qqwweee/keras-yolo3
yolo3/model.py
https://github.com/qqwweee/keras-yolo3/blob/master/yolo3/model.py
MIT
def yolo_body(inputs, num_anchors, num_classes): """Create YOLO_V3 model CNN body in Keras.""" darknet = Model(inputs, darknet_body(inputs)) x, y1 = make_last_layers(darknet.output, 512, num_anchors*(num_classes+5)) x = compose( DarknetConv2D_BN_Leaky(256, (1,1)), UpSampling2D(2))(x) x = Concatenate()([x,darknet.layers[152].output]) x, y2 = make_last_layers(x, 256, num_anchors*(num_classes+5)) x = compose( DarknetConv2D_BN_Leaky(128, (1,1)), UpSampling2D(2))(x) x = Concatenate()([x,darknet.layers[92].output]) x, y3 = make_last_layers(x, 128, num_anchors*(num_classes+5)) return Model(inputs, [y1,y2,y3])
Create YOLO_V3 model CNN body in Keras.
yolo_body
python
qqwweee/keras-yolo3
yolo3/model.py
https://github.com/qqwweee/keras-yolo3/blob/master/yolo3/model.py
MIT
def tiny_yolo_body(inputs, num_anchors, num_classes): '''Create Tiny YOLO_v3 model CNN body in keras.''' x1 = compose( DarknetConv2D_BN_Leaky(16, (3,3)), MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'), DarknetConv2D_BN_Leaky(32, (3,3)), MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'), DarknetConv2D_BN_Leaky(64, (3,3)), MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'), DarknetConv2D_BN_Leaky(128, (3,3)), MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'), DarknetConv2D_BN_Leaky(256, (3,3)))(inputs) x2 = compose( MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'), DarknetConv2D_BN_Leaky(512, (3,3)), MaxPooling2D(pool_size=(2,2), strides=(1,1), padding='same'), DarknetConv2D_BN_Leaky(1024, (3,3)), DarknetConv2D_BN_Leaky(256, (1,1)))(x1) y1 = compose( DarknetConv2D_BN_Leaky(512, (3,3)), DarknetConv2D(num_anchors*(num_classes+5), (1,1)))(x2) x2 = compose( DarknetConv2D_BN_Leaky(128, (1,1)), UpSampling2D(2))(x2) y2 = compose( Concatenate(), DarknetConv2D_BN_Leaky(256, (3,3)), DarknetConv2D(num_anchors*(num_classes+5), (1,1)))([x2,x1]) return Model(inputs, [y1,y2])
Create Tiny YOLO_v3 model CNN body in keras.
tiny_yolo_body
python
qqwweee/keras-yolo3
yolo3/model.py
https://github.com/qqwweee/keras-yolo3/blob/master/yolo3/model.py
MIT
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False): """Convert final layer features to bounding box parameters.""" num_anchors = len(anchors) # Reshape to batch, height, width, num_anchors, box_params. anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2]) grid_shape = K.shape(feats)[1:3] # height, width grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]), [1, grid_shape[1], 1, 1]) grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]), [grid_shape[0], 1, 1, 1]) grid = K.concatenate([grid_x, grid_y]) grid = K.cast(grid, K.dtype(feats)) feats = K.reshape( feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5]) # Adjust preditions to each spatial grid point and anchor size. box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats)) box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats)) box_confidence = K.sigmoid(feats[..., 4:5]) box_class_probs = K.sigmoid(feats[..., 5:]) if calc_loss == True: return grid, feats, box_xy, box_wh return box_xy, box_wh, box_confidence, box_class_probs
Convert final layer features to bounding box parameters.
yolo_head
python
qqwweee/keras-yolo3
yolo3/model.py
https://github.com/qqwweee/keras-yolo3/blob/master/yolo3/model.py
MIT
def yolo_eval(yolo_outputs, anchors, num_classes, image_shape, max_boxes=20, score_threshold=.6, iou_threshold=.5): """Evaluate YOLO model on given input and return filtered boxes.""" num_layers = len(yolo_outputs) anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]] # default setting input_shape = K.shape(yolo_outputs[0])[1:3] * 32 boxes = [] box_scores = [] for l in range(num_layers): _boxes, _box_scores = yolo_boxes_and_scores(yolo_outputs[l], anchors[anchor_mask[l]], num_classes, input_shape, image_shape) boxes.append(_boxes) box_scores.append(_box_scores) boxes = K.concatenate(boxes, axis=0) box_scores = K.concatenate(box_scores, axis=0) mask = box_scores >= score_threshold max_boxes_tensor = K.constant(max_boxes, dtype='int32') boxes_ = [] scores_ = [] classes_ = [] for c in range(num_classes): # TODO: use keras backend instead of tf. class_boxes = tf.boolean_mask(boxes, mask[:, c]) class_box_scores = tf.boolean_mask(box_scores[:, c], mask[:, c]) nms_index = tf.image.non_max_suppression( class_boxes, class_box_scores, max_boxes_tensor, iou_threshold=iou_threshold) class_boxes = K.gather(class_boxes, nms_index) class_box_scores = K.gather(class_box_scores, nms_index) classes = K.ones_like(class_box_scores, 'int32') * c boxes_.append(class_boxes) scores_.append(class_box_scores) classes_.append(classes) boxes_ = K.concatenate(boxes_, axis=0) scores_ = K.concatenate(scores_, axis=0) classes_ = K.concatenate(classes_, axis=0) return boxes_, scores_, classes_
Evaluate YOLO model on given input and return filtered boxes.
yolo_eval
python
qqwweee/keras-yolo3
yolo3/model.py
https://github.com/qqwweee/keras-yolo3/blob/master/yolo3/model.py
MIT
def preprocess_true_boxes(true_boxes, input_shape, anchors, num_classes): '''Preprocess true boxes to training input format Parameters ---------- true_boxes: array, shape=(m, T, 5) Absolute x_min, y_min, x_max, y_max, class_id relative to input_shape. input_shape: array-like, hw, multiples of 32 anchors: array, shape=(N, 2), wh num_classes: integer Returns ------- y_true: list of array, shape like yolo_outputs, xywh are reletive value ''' assert (true_boxes[..., 4]<num_classes).all(), 'class id must be less than num_classes' num_layers = len(anchors)//3 # default setting anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]] true_boxes = np.array(true_boxes, dtype='float32') input_shape = np.array(input_shape, dtype='int32') boxes_xy = (true_boxes[..., 0:2] + true_boxes[..., 2:4]) // 2 boxes_wh = true_boxes[..., 2:4] - true_boxes[..., 0:2] true_boxes[..., 0:2] = boxes_xy/input_shape[::-1] true_boxes[..., 2:4] = boxes_wh/input_shape[::-1] m = true_boxes.shape[0] grid_shapes = [input_shape//{0:32, 1:16, 2:8}[l] for l in range(num_layers)] y_true = [np.zeros((m,grid_shapes[l][0],grid_shapes[l][1],len(anchor_mask[l]),5+num_classes), dtype='float32') for l in range(num_layers)] # Expand dim to apply broadcasting. anchors = np.expand_dims(anchors, 0) anchor_maxes = anchors / 2. anchor_mins = -anchor_maxes valid_mask = boxes_wh[..., 0]>0 for b in range(m): # Discard zero rows. wh = boxes_wh[b, valid_mask[b]] if len(wh)==0: continue # Expand dim to apply broadcasting. wh = np.expand_dims(wh, -2) box_maxes = wh / 2. box_mins = -box_maxes intersect_mins = np.maximum(box_mins, anchor_mins) intersect_maxes = np.minimum(box_maxes, anchor_maxes) intersect_wh = np.maximum(intersect_maxes - intersect_mins, 0.) intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1] box_area = wh[..., 0] * wh[..., 1] anchor_area = anchors[..., 0] * anchors[..., 1] iou = intersect_area / (box_area + anchor_area - intersect_area) # Find best anchor for each true box best_anchor = np.argmax(iou, axis=-1) for t, n in enumerate(best_anchor): for l in range(num_layers): if n in anchor_mask[l]: i = np.floor(true_boxes[b,t,0]*grid_shapes[l][1]).astype('int32') j = np.floor(true_boxes[b,t,1]*grid_shapes[l][0]).astype('int32') k = anchor_mask[l].index(n) c = true_boxes[b,t, 4].astype('int32') y_true[l][b, j, i, k, 0:4] = true_boxes[b,t, 0:4] y_true[l][b, j, i, k, 4] = 1 y_true[l][b, j, i, k, 5+c] = 1 return y_true
Preprocess true boxes to training input format Parameters ---------- true_boxes: array, shape=(m, T, 5) Absolute x_min, y_min, x_max, y_max, class_id relative to input_shape. input_shape: array-like, hw, multiples of 32 anchors: array, shape=(N, 2), wh num_classes: integer Returns ------- y_true: list of array, shape like yolo_outputs, xywh are reletive value
preprocess_true_boxes
python
qqwweee/keras-yolo3
yolo3/model.py
https://github.com/qqwweee/keras-yolo3/blob/master/yolo3/model.py
MIT
def box_iou(b1, b2): '''Return iou tensor Parameters ---------- b1: tensor, shape=(i1,...,iN, 4), xywh b2: tensor, shape=(j, 4), xywh Returns ------- iou: tensor, shape=(i1,...,iN, j) ''' # Expand dim to apply broadcasting. b1 = K.expand_dims(b1, -2) b1_xy = b1[..., :2] b1_wh = b1[..., 2:4] b1_wh_half = b1_wh/2. b1_mins = b1_xy - b1_wh_half b1_maxes = b1_xy + b1_wh_half # Expand dim to apply broadcasting. b2 = K.expand_dims(b2, 0) b2_xy = b2[..., :2] b2_wh = b2[..., 2:4] b2_wh_half = b2_wh/2. b2_mins = b2_xy - b2_wh_half b2_maxes = b2_xy + b2_wh_half intersect_mins = K.maximum(b1_mins, b2_mins) intersect_maxes = K.minimum(b1_maxes, b2_maxes) intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.) intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1] b1_area = b1_wh[..., 0] * b1_wh[..., 1] b2_area = b2_wh[..., 0] * b2_wh[..., 1] iou = intersect_area / (b1_area + b2_area - intersect_area) return iou
Return iou tensor Parameters ---------- b1: tensor, shape=(i1,...,iN, 4), xywh b2: tensor, shape=(j, 4), xywh Returns ------- iou: tensor, shape=(i1,...,iN, j)
box_iou
python
qqwweee/keras-yolo3
yolo3/model.py
https://github.com/qqwweee/keras-yolo3/blob/master/yolo3/model.py
MIT
def yolo_loss(args, anchors, num_classes, ignore_thresh=.5, print_loss=False): '''Return yolo_loss tensor Parameters ---------- yolo_outputs: list of tensor, the output of yolo_body or tiny_yolo_body y_true: list of array, the output of preprocess_true_boxes anchors: array, shape=(N, 2), wh num_classes: integer ignore_thresh: float, the iou threshold whether to ignore object confidence loss Returns ------- loss: tensor, shape=(1,) ''' num_layers = len(anchors)//3 # default setting yolo_outputs = args[:num_layers] y_true = args[num_layers:] anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]] input_shape = K.cast(K.shape(yolo_outputs[0])[1:3] * 32, K.dtype(y_true[0])) grid_shapes = [K.cast(K.shape(yolo_outputs[l])[1:3], K.dtype(y_true[0])) for l in range(num_layers)] loss = 0 m = K.shape(yolo_outputs[0])[0] # batch size, tensor mf = K.cast(m, K.dtype(yolo_outputs[0])) for l in range(num_layers): object_mask = y_true[l][..., 4:5] true_class_probs = y_true[l][..., 5:] grid, raw_pred, pred_xy, pred_wh = yolo_head(yolo_outputs[l], anchors[anchor_mask[l]], num_classes, input_shape, calc_loss=True) pred_box = K.concatenate([pred_xy, pred_wh]) # Darknet raw box to calculate loss. raw_true_xy = y_true[l][..., :2]*grid_shapes[l][::-1] - grid raw_true_wh = K.log(y_true[l][..., 2:4] / anchors[anchor_mask[l]] * input_shape[::-1]) raw_true_wh = K.switch(object_mask, raw_true_wh, K.zeros_like(raw_true_wh)) # avoid log(0)=-inf box_loss_scale = 2 - y_true[l][...,2:3]*y_true[l][...,3:4] # Find ignore mask, iterate over each of batch. ignore_mask = tf.TensorArray(K.dtype(y_true[0]), size=1, dynamic_size=True) object_mask_bool = K.cast(object_mask, 'bool') def loop_body(b, ignore_mask): true_box = tf.boolean_mask(y_true[l][b,...,0:4], object_mask_bool[b,...,0]) iou = box_iou(pred_box[b], true_box) best_iou = K.max(iou, axis=-1) ignore_mask = ignore_mask.write(b, K.cast(best_iou<ignore_thresh, K.dtype(true_box))) return b+1, ignore_mask _, ignore_mask = K.control_flow_ops.while_loop(lambda b,*args: b<m, loop_body, [0, ignore_mask]) ignore_mask = ignore_mask.stack() ignore_mask = K.expand_dims(ignore_mask, -1) # K.binary_crossentropy is helpful to avoid exp overflow. xy_loss = object_mask * box_loss_scale * K.binary_crossentropy(raw_true_xy, raw_pred[...,0:2], from_logits=True) wh_loss = object_mask * box_loss_scale * 0.5 * K.square(raw_true_wh-raw_pred[...,2:4]) confidence_loss = object_mask * K.binary_crossentropy(object_mask, raw_pred[...,4:5], from_logits=True)+ \ (1-object_mask) * K.binary_crossentropy(object_mask, raw_pred[...,4:5], from_logits=True) * ignore_mask class_loss = object_mask * K.binary_crossentropy(true_class_probs, raw_pred[...,5:], from_logits=True) xy_loss = K.sum(xy_loss) / mf wh_loss = K.sum(wh_loss) / mf confidence_loss = K.sum(confidence_loss) / mf class_loss = K.sum(class_loss) / mf loss += xy_loss + wh_loss + confidence_loss + class_loss if print_loss: loss = tf.Print(loss, [loss, xy_loss, wh_loss, confidence_loss, class_loss, K.sum(ignore_mask)], message='loss: ') return loss
Return yolo_loss tensor Parameters ---------- yolo_outputs: list of tensor, the output of yolo_body or tiny_yolo_body y_true: list of array, the output of preprocess_true_boxes anchors: array, shape=(N, 2), wh num_classes: integer ignore_thresh: float, the iou threshold whether to ignore object confidence loss Returns ------- loss: tensor, shape=(1,)
yolo_loss
python
qqwweee/keras-yolo3
yolo3/model.py
https://github.com/qqwweee/keras-yolo3/blob/master/yolo3/model.py
MIT
def compose(*funcs): """Compose arbitrarily many functions, evaluated left to right. Reference: https://mathieularose.com/function-composition-in-python/ """ # return lambda x: reduce(lambda v, f: f(v), funcs, x) if funcs: return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs) else: raise ValueError('Composition of empty sequence not supported.')
Compose arbitrarily many functions, evaluated left to right. Reference: https://mathieularose.com/function-composition-in-python/
compose
python
qqwweee/keras-yolo3
yolo3/utils.py
https://github.com/qqwweee/keras-yolo3/blob/master/yolo3/utils.py
MIT
def letterbox_image(image, size): '''resize image with unchanged aspect ratio using padding''' iw, ih = image.size w, h = size scale = min(w/iw, h/ih) nw = int(iw*scale) nh = int(ih*scale) image = image.resize((nw,nh), Image.BICUBIC) new_image = Image.new('RGB', size, (128,128,128)) new_image.paste(image, ((w-nw)//2, (h-nh)//2)) return new_image
resize image with unchanged aspect ratio using padding
letterbox_image
python
qqwweee/keras-yolo3
yolo3/utils.py
https://github.com/qqwweee/keras-yolo3/blob/master/yolo3/utils.py
MIT