inputs
stringlengths 312
52k
| targets
stringlengths 1
3.1k
⌀ | block_type
stringclasses 11
values | scenario
stringclasses 7
values |
---|---|---|---|
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except<fim_suffix>
<fim_middle> (ValueError, TypeError):
pass
|
(ValueError, TypeError):
pass
|
CATCH
|
prefix_full_suffix_func_empty_complete_current_block_no_evidence
|
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except<fim_suffix>
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple)
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
# If target_type is a subclass of set, return an instance of target_type
if self._is_subclass_of_generic(target_type, set):
return target_type(instantiated_items)
return instantiated_items
# Handle deques
if origin is deque or (isinstance(origin, type) and issubclass(origin, set)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return deque(self.instantiate(item, item_type) for item in data)
if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return frozenset(self.instantiate(item, item_type) for item in data)
except TypeError as e:
print(e)
raise TypeError(f"Failed to instantiate {target_type} from list. {e}")
# If none of the above, return the data as-is
return data
<fim_middle> Exception as e:
print(e)
return False
|
Exception as e:
print(e)
return False
|
CATCH
|
prefix_full_suffix_func_empty_complete_current_block_no_evidence
|
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except<fim_suffix>
<fim_middle> (ValueError, TypeError):
pass
|
(ValueError, TypeError):
pass
|
CATCH
|
prefix_full_suffix_func_empty_complete_current_block_no_evidence
|
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except<fim_suffix>
<fim_middle> AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
|
AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
|
CATCH
|
prefix_full_suffix_func_empty_complete_current_block_no_evidence
|
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except<fim_suffix>
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple)
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
# If target_type is a subclass of set, return an instance of target_type
if self._is_subclass_of_generic(target_type, set):
return target_type(instantiated_items)
return instantiated_items
# Handle deques
if origin is deque or (isinstance(origin, type) and issubclass(origin, set)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return deque(self.instantiate(item, item_type) for item in data)
if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return frozenset(self.instantiate(item, item_type) for item in data)
except TypeError as e:
print(e)
raise TypeError(f"Failed to instantiate {target_type} from list. {e}")
# If none of the above, return the data as-is
return data
<fim_middle> json.JSONDecodeError:
return False
|
json.JSONDecodeError:
return False
|
CATCH
|
prefix_full_suffix_func_empty_complete_current_block_no_evidence
|
<filename>tanuki_py/src/tanuki/language_models/openai_api.py<fim_prefix>from typing import List
import logging
import time
# import abstract base class
from openai import OpenAI
from openai.types import CreateEmbeddingResponse
from openai.types.fine_tuning import FineTuningJob
from tanuki.language_models.llm_finetune_api_abc import LLM_Finetune_API
from tanuki.models.embedding import Embedding
from tanuki.language_models.embedding_api_abc import Embedding_API
from tanuki.language_models.llm_api_abc import LLM_API
import os
from tanuki.constants import DEFAULT_DISTILLED_MODEL_NAME
from tanuki.language_models.llm_configs.openai_config import OpenAIConfig
from tanuki.models.finetune_job import FinetuneJob
import copy
OPENAI_URL = "https://api.openai.com/v1/chat/completions"
import requests
LLM_GENERATION_PARAMETERS = ["temperature", "top_p", "max_new_tokens", "frequency_penalty", "presence_penalty"]
class OpenAI_API(LLM_API, Embedding_API, LLM_Finetune_API):
def __init__(self) -> None:
# initialise the abstract base class
super().__init__()
self.api_key = os.environ.get("OPENAI_API_KEY")
self.client = None
def embed(self, texts: List[str], model: OpenAIConfig, **kwargs) -> List[Embedding]:
"""
Generate embeddings for the provided texts using the specified OpenAI model.
Lightweight wrapper over the OpenAI client.
:param texts: A list of texts to embed.
:param model: The model to use for embeddings.
:return: A list of embeddings.
"""
self.check_api_key()
try:
response: CreateEmbeddingResponse = self.client.embeddings.create(
input=texts,
model=model.model_name,
**kwargs
)
assert response.object == "list"
assert len(response.data) == len(texts)
embeddings = []
for embedding_response in response.data:
assert embedding_response.object == "embedding"
embeddings.append(Embedding(embedding_response.embedding))
return embeddings
except Exception as e:
print(f"An error occurred: {e}")
return None
def generate(self, model, system_message, prompt, **kwargs):
"""
The main generation function, given the args, kwargs, function_modeler, function description and model type, generate a response
Args
model (OpenAIConfig): The model to use for generation.
system_message (str): The system message to use for generation.
prompt (str): The prompt to use for generation.
kwargs (dict): Additional generation parameters.
"""
self.check_api_key()
temperature = kwargs.get("temperature", 0.1)
top_p = kwargs.get("top_p", 1)
frequency_penalty = kwargs.get("frequency_penalty", 0)
presence_penalty = kwargs.get("presence_penalty", 0)
max_new_tokens = kwargs.get("max_new_tokens")
# check if there are any generation parameters that are not supported
unsupported_params = [param for param in kwargs.keys() if param not in LLM_GENERATION_PARAMETERS]
if len(unsupported_params) > 0:
# log warning
logging.warning(f"Unused generation parameters sent as input: {unsupported_params}."\
f"For OpenAI, only the following parameters are supported: {LLM_GENERATION_PARAMETERS}")
params = {
"model": model.model_name,
"temperature": temperature,
"max_tokens": max_new_tokens,
"top_p": top_p,
"frequency_penalty": frequency_penalty,
"presence_penalty": presence_penalty,
}
if model.parsing_helper_tokens["start_token"]:
prompt += model.parsing_helper_tokens["start_token"]
messages = [
{
"role": "system",
"content": system_message
},
{
"role": "user",
"content": prompt
}
]
params["messages"] = messages
counter = 0
choice = None
# initiate response so exception logic doesnt error out when checking for error in response
response = {}
while counter <= 5:
try:
openai_headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json",
}
response = requests.post(
OPENAI_URL, headers=openai_headers, json=params, timeout=50
)
response = response.json()
choice = response["choices"][0]["message"]["content"].strip("'")
break
except<fim_suffix>
def list_finetuned(self, model_config, limit=100, **kwargs) -> List[FinetuneJob]:
self.check_api_key()
response = self.client.fine_tuning.jobs.list(limit=limit)
jobs = []
for job in response.data:
finetune_job = self.create_finetune_job(job, model_config)
jobs.append(finetune_job)
return jobs
def get_finetuned(self, job_id, model_config: OpenAIConfig) -> FinetuneJob:
self.check_api_key()
response = self.client.fine_tuning.jobs.retrieve(job_id)
finetune_job = self.create_finetune_job(response, model_config= model_config)
return finetune_job
def finetune(self, file, suffix, model_config, **kwargs) -> FinetuneJob:
self.check_api_key()
# Use the stream as a file
response = self.client.files.create(file=file, purpose='fine-tune')
training_file_id = response.id
if not model_config.base_model_for_sft:
model_config.base_model_for_sft = DEFAULT_DISTILLED_MODEL_NAME
# submit the finetuning job
finetuning_response: FineTuningJob = self.client.fine_tuning.jobs.create(training_file=training_file_id,
model=model_config.base_model_for_sft,
suffix=suffix)
finetune_job = self.create_finetune_job(finetuning_response, model_config)
return finetune_job
def create_finetune_job(self, response: FineTuningJob, model_config: OpenAIConfig) -> FinetuneJob:
finetuned_model_config = copy.deepcopy(model_config)
finetuned_model_config.model_name = response.fine_tuned_model
finetune_job = FinetuneJob(response.id, response.status, finetuned_model_config)
return finetune_job
def check_api_key(self):
# check if api key is not none
if not self.api_key:
# try to get the api key from the environment, maybe it has been set later
self.api_key = os.getenv("OPENAI_API_KEY")
if not self.api_key:
raise ValueError("OpenAI API key is not set")
if not self.client:
self.client = OpenAI(api_key=self.api_key)
<fim_middle> Exception as e:
if ("error" in response and
"code" in response["error"] and
response["error"]["code"] == 'invalid_api_key'):
raise Exception(f"The supplied OpenAI API key {self.api_key} is invalid")
if counter == 5:
raise Exception(f"OpenAI API failed to generate a response: {e}")
counter += 1
time.sleep(2 ** counter)
continue
|
Exception as e:
if ("error" in response and
"code" in response["error"] and
response["error"]["code"] == 'invalid_api_key'):
raise Exception(f"The supplied OpenAI API key {self.api_key} is invalid")
if counter == 5:
raise Exception(f"OpenAI API failed to generate a response: {e}")
counter += 1
time.sleep(2 ** counter)
continue
|
CATCH
|
prefix_full_suffix_func_empty_complete_current_block_no_evidence
|
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except<fim_suffix>
<fim_middle> ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
|
ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
|
CATCH
|
prefix_full_suffix_func_empty_complete_current_block_no_evidence
|
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except<fim_suffix>
<fim_middle> TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
|
TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
|
CATCH
|
prefix_full_suffix_func_empty_complete_current_block_no_evidence
|
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for<fim_suffix>
<fim_middle> arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
|
arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
|
FOR
|
prefix_full_suffix_func_empty_complete_current_block_no_evidence
|
<filename>tanuki_py/src/tanuki/bloom_filter.py<fim_prefix>import hashlib
import logging
import math
import numpy as np
from bitarray import bitarray
from tanuki.persistence.filter.bloom_interface import IBloomFilterPersistence
class BloomFilter:
def __init__(self,
persistence: IBloomFilterPersistence,
size=None,
hash_count=None,
expected_number_of_elements=None,
false_positive_probability=None):
if not persistence:
raise ValueError("Persistence cannot be None, it must be an instance of IBloomFilterPersistence")
if not size and not hash_count and not expected_number_of_elements and not false_positive_probability:
raise ValueError("Must specify either (size, hash_count) or (expected_number_of_elements, false_positive_probability")
if expected_number_of_elements and false_positive_probability:
size, hash_count = BloomFilter.optimal_bloom_filter_params(expected_number_of_elements, false_positive_probability)
if not size and not hash_count:
raise ValueError("Size and hash_count not set. This should never happen.")
self.size = size
self.hash_count = hash_count
self.bit_array, self.indices = self.init_bit_array(size)
self.persistence = persistence
def init_bit_array(self, size):
_bit_array = bitarray(size)
_bit_array.setall(0)
_indices = np.zeros(size, dtype=np.int32)
return _bit_array, _indices
def hash_functions(self, string):
# h1(x)
hash1 = int(hashlib.sha256(string.encode('utf-8')).hexdigest(), 16)
# h2(x)
hash2 = int(hashlib.md5(string.encode('utf-8')).hexdigest(), 16)
return hash1, hash2
def lookup(self, string):
hash1, hash2 = self.hash_functions(string)
for seed in range(self.hash_count):
index = (hash1 + seed * hash2) % self.size
#print(f"Lookup: Seed={seed}, Digest={index}, BitValue={self.bit_array[index]}")
if self.bit_array[index] == 0:
return False
return True
def add(self, string):
hash1, hash2 = self.hash_functions(string)
for<fim_suffix>
def save(self):
self.persistence.save(self.bit_array)
def load(self):
self.bit_array = self.persistence.load()
length_in_bytes = int(len(self.bit_array)/8)
expected_length = math.ceil(self.size / 8)
if length_in_bytes != expected_length:
logging.warning("Bit array length does not match expected size, and so might be corrupted. Reinitializing.")
self.bit_array, self.indices = self.init_bit_array(self.size)
self.save()
@staticmethod
def optimal_bloom_filter_params(n, p):
"""
Calculate the optimal bit array size (m) and number of hash functions (k)
for a Bloom filter.
n: expected number of items to be stored
p: acceptable false positive probability
Returns a tuple (m, k)
"""
m = - (n * math.log(p)) / (math.log(2) ** 2)
k = (m / n) * math.log(2)
return int(math.ceil(m)), int(math.ceil(k))<fim_middle> seed in range(self.hash_count):
index = (hash1 + seed * hash2) % self.size
self.bit_array[index] = 1
#print(f"Add: Seed={seed}, Digest={index}, BitValue={self.bit_array[index]}")
|
seed in range(self.hash_count):
index = (hash1 + seed * hash2) % self.size
self.bit_array[index] = 1
#print(f"Add: Seed={seed}, Digest={index}, BitValue={self.bit_array[index]}")
|
FOR
|
prefix_full_suffix_func_empty_complete_current_block_no_evidence
|
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple)
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for<fim_suffix>
<fim_middle> item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
|
item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
|
FOR
|
prefix_full_suffix_func_empty_complete_current_block_no_evidence
|
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for<fim_suffix>
<fim_middle> i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
|
i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
|
FOR
|
prefix_full_suffix_func_empty_complete_current_block_no_evidence
|
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for<fim_suffix>
<fim_middle> item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
|
item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
|
FOR
|
prefix_full_suffix_func_empty_complete_current_block_no_evidence
|
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for<fim_suffix>
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple)
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
# If target_type is a subclass of set, return an instance of target_type
if self._is_subclass_of_generic(target_type, set):
return target_type(instantiated_items)
return instantiated_items
# Handle deques
if origin is deque or (isinstance(origin, type) and issubclass(origin, set)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return deque(self.instantiate(item, item_type) for item in data)
if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return frozenset(self.instantiate(item, item_type) for item in data)
except TypeError as e:
print(e)
raise TypeError(f"Failed to instantiate {target_type} from list. {e}")
# If none of the above, return the data as-is
return data
<fim_middle> base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
|
base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
|
FOR
|
prefix_full_suffix_func_empty_complete_current_block_no_evidence
|
<filename>tanuki_py/src/tanuki/language_models/openai_api.py<fim_prefix>from typing import List
import logging
import time
# import abstract base class
from openai import OpenAI
from openai.types import CreateEmbeddingResponse
from openai.types.fine_tuning import FineTuningJob
from tanuki.language_models.llm_finetune_api_abc import LLM_Finetune_API
from tanuki.models.embedding import Embedding
from tanuki.language_models.embedding_api_abc import Embedding_API
from tanuki.language_models.llm_api_abc import LLM_API
import os
from tanuki.constants import DEFAULT_DISTILLED_MODEL_NAME
from tanuki.language_models.llm_configs.openai_config import OpenAIConfig
from tanuki.models.finetune_job import FinetuneJob
import copy
OPENAI_URL = "https://api.openai.com/v1/chat/completions"
import requests
LLM_GENERATION_PARAMETERS = ["temperature", "top_p", "max_new_tokens", "frequency_penalty", "presence_penalty"]
class OpenAI_API(LLM_API, Embedding_API, LLM_Finetune_API):
def __init__(self) -> None:
# initialise the abstract base class
super().__init__()
self.api_key = os.environ.get("OPENAI_API_KEY")
self.client = None
def embed(self, texts: List[str], model: OpenAIConfig, **kwargs) -> List[Embedding]:
"""
Generate embeddings for the provided texts using the specified OpenAI model.
Lightweight wrapper over the OpenAI client.
:param texts: A list of texts to embed.
:param model: The model to use for embeddings.
:return: A list of embeddings.
"""
self.check_api_key()
try:
response: CreateEmbeddingResponse = self.client.embeddings.create(
input=texts,
model=model.model_name,
**kwargs
)
assert response.object == "list"
assert len(response.data) == len(texts)
embeddings = []
for embedding_response in response.data:
assert embedding_response.object == "embedding"
embeddings.append(Embedding(embedding_response.embedding))
return embeddings
except Exception as e:
print(f"An error occurred: {e}")
return None
def generate(self, model, system_message, prompt, **kwargs):
"""
The main generation function, given the args, kwargs, function_modeler, function description and model type, generate a response
Args
model (OpenAIConfig): The model to use for generation.
system_message (str): The system message to use for generation.
prompt (str): The prompt to use for generation.
kwargs (dict): Additional generation parameters.
"""
self.check_api_key()
temperature = kwargs.get("temperature", 0.1)
top_p = kwargs.get("top_p", 1)
frequency_penalty = kwargs.get("frequency_penalty", 0)
presence_penalty = kwargs.get("presence_penalty", 0)
max_new_tokens = kwargs.get("max_new_tokens")
# check if there are any generation parameters that are not supported
unsupported_params = [param for param in kwargs.keys() if param not in LLM_GENERATION_PARAMETERS]
if len(unsupported_params) > 0:
# log warning
logging.warning(f"Unused generation parameters sent as input: {unsupported_params}."\
f"For OpenAI, only the following parameters are supported: {LLM_GENERATION_PARAMETERS}")
params = {
"model": model.model_name,
"temperature": temperature,
"max_tokens": max_new_tokens,
"top_p": top_p,
"frequency_penalty": frequency_penalty,
"presence_penalty": presence_penalty,
}
if model.parsing_helper_tokens["start_token"]:
prompt += model.parsing_helper_tokens["start_token"]
messages = [
{
"role": "system",
"content": system_message
},
{
"role": "user",
"content": prompt
}
]
params["messages"] = messages
counter = 0
choice = None
# initiate response so exception logic doesnt error out when checking for error in response
response = {}
while<fim_suffix>
def list_finetuned(self, model_config, limit=100, **kwargs) -> List[FinetuneJob]:
self.check_api_key()
response = self.client.fine_tuning.jobs.list(limit=limit)
jobs = []
for job in response.data:
finetune_job = self.create_finetune_job(job, model_config)
jobs.append(finetune_job)
return jobs
def get_finetuned(self, job_id, model_config: OpenAIConfig) -> FinetuneJob:
self.check_api_key()
response = self.client.fine_tuning.jobs.retrieve(job_id)
finetune_job = self.create_finetune_job(response, model_config= model_config)
return finetune_job
def finetune(self, file, suffix, model_config, **kwargs) -> FinetuneJob:
self.check_api_key()
# Use the stream as a file
response = self.client.files.create(file=file, purpose='fine-tune')
training_file_id = response.id
if not model_config.base_model_for_sft:
model_config.base_model_for_sft = DEFAULT_DISTILLED_MODEL_NAME
# submit the finetuning job
finetuning_response: FineTuningJob = self.client.fine_tuning.jobs.create(training_file=training_file_id,
model=model_config.base_model_for_sft,
suffix=suffix)
finetune_job = self.create_finetune_job(finetuning_response, model_config)
return finetune_job
def create_finetune_job(self, response: FineTuningJob, model_config: OpenAIConfig) -> FinetuneJob:
finetuned_model_config = copy.deepcopy(model_config)
finetuned_model_config.model_name = response.fine_tuned_model
finetune_job = FinetuneJob(response.id, response.status, finetuned_model_config)
return finetune_job
def check_api_key(self):
# check if api key is not none
if not self.api_key:
# try to get the api key from the environment, maybe it has been set later
self.api_key = os.getenv("OPENAI_API_KEY")
if not self.api_key:
raise ValueError("OpenAI API key is not set")
if not self.client:
self.client = OpenAI(api_key=self.api_key)
<fim_middle> counter <= 5:
try:
openai_headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json",
}
response = requests.post(
OPENAI_URL, headers=openai_headers, json=params, timeout=50
)
response = response.json()
choice = response["choices"][0]["message"]["content"].strip("'")
break
except Exception as e:
if ("error" in response and
"code" in response["error"] and
response["error"]["code"] == 'invalid_api_key'):
raise Exception(f"The supplied OpenAI API key {self.api_key} is invalid")
if counter == 5:
raise Exception(f"OpenAI API failed to generate a response: {e}")
counter += 1
time.sleep(2 ** counter)
continue
|
counter <= 5:
try:
openai_headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json",
}
response = requests.post(
OPENAI_URL, headers=openai_headers, json=params, timeout=50
)
response = response.json()
choice = response["choices"][0]["message"]["content"].strip("'")
break
except Exception as e:
if ("error" in response and
"code" in response["error"] and
response["error"]["code"] == 'invalid_api_key'):
raise Exception(f"The supplied OpenAI API key {self.api_key} is invalid")
if counter == 5:
raise Exception(f"OpenAI API failed to generate a response: {e}")
counter += 1
time.sleep(2 ** counter)
continue
|
WHILE
|
prefix_full_suffix_func_empty_complete_current_block_no_evidence
|
<filename>UHGEval/uhgeval/dataset/truthfulqa.py<fim_prefix># @Author : YeZhaohui Wang
# @Email : [email protected]
import csv
import json
import os
import random
from uhgeval.dataset.base import BaseDataset
class TruthfunQAGeneration(BaseDataset):
def __init__(self, path: str, shuffle: bool = False, seed: int = 22):
self.data = []
if os.path.isfile(path):
with open(path, 'r', encoding='utf-8-sig') as file:
csv_reader = csv.DictReader(file)
id = 1
for row in csv_reader:
row['id'] = id
id += 1
self.data.append(row)
if shuffle:
random.seed(seed)
random.shuffle(self.data)
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, key: int | slice) -> dict | list[dict]:
return self.data[key]
def load(self) -> list[dict]:
return self.data[:]
class TruthfunQAMC1(BaseDataset):
def __init__(self, path: str, shuffle: bool = False, seed: int = 22):
self.data<fim_suffix><fim_middle> = []
|
= []
|
STATEMENT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix># @Author : Shichao Song
# @Email : [email protected]
from typing import Callable
import evaluate
import jieba
from loguru import logger
from text2vec import Similarity
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
logger.warning(repr(e))
return wrapper
@catch_all_exceptions
def bleu4_score(
continuation: str,
reference: str,
with_penalty = False
) -> float:
import math
from nltk.translate.bleu_score import sentence_bleu
# Tokenize the continuation and reference texts using the custom tokenizer function
continuation_tokens = custom_tokenizer(continuation)
reference_tokens = custom_tokenizer(reference)
# Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function
bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25))
# If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty
if with_penalty:
# Calculate the length of the reference and continuation texts
reference_length = len(reference_tokens)
continuation_length = len(continuation_tokens)
# Calculate the brevity penalty factor
if continuation_length > reference_length:
brevity_penalty = 1
else:
brevity_penalty = math.exp(1 - (reference_length / continuation_length))
# Adjust the BLEU score with the brevity penalty
bleu_score = bleu_score * brevity_penalty
return bleu_score
@catch_all_exceptions
def rougeL_score(
continuation: str,
reference: str
) -> float:
f = lambda text: list(jieba.cut(text))
rouge = evaluate.load('uhgeval/.cache/huggingface/rouge')
results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL'])
score = results['rougeL']
return score
@catch_all_exceptions
def kw_precision(
continuation: str,
reference: str,
kw_extracter: Callable[[str], list[str]],
with_kw_list: bool = True
) -> float | tuple[float, list[str], list[str]]:
"""Measure the rationality of a generated continuation sentence with respect to the original news object."""
kws = kw_extracter(continuation)
if len(kws) == 0:
return 0, [], [] if with_kw_list else 0
appeared_kws = [kw for kw in kws if kw in reference]
precision = len(appeared_kws) / len(kws)
return precision, appeared_kws, kws if with_kw_list else precision
@catch_all_exceptions
def bert_score(
continuation: str,
reference: str
) -> float:
"""
Note:
Requesting the network to connect to Hugging Face.
"""
sim = Similarity()
score = sim.get_score(continuation, reference)
return score
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
if precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall)
accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0
return<fim_suffix><fim_middle> accuracy, precision, recall, f1
|
accuracy, precision, recall, f1
|
STATEMENT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix># @Author : Shichao Song
# @Email : [email protected]
from typing import Callable
import evaluate
import jieba
from loguru import logger
from text2vec import Similarity
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
logger.warning(repr(e))
return wrapper
@catch_all_exceptions
def bleu4_score(
continuation: str,
reference: str,
with_penalty = False
) -> float:
import math
from nltk.translate.bleu_score import sentence_bleu
# Tokenize the continuation and reference texts using the custom tokenizer function
continuation_tokens = custom_tokenizer(continuation)
reference_tokens = custom_tokenizer(reference)
# Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function
bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25))
# If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty
if with_penalty:
# Calculate the length of the reference and continuation texts
reference_length = len(reference_tokens)
continuation_length = len(continuation_tokens)
# Calculate the brevity penalty factor
if continuation_length > reference_length:
brevity_penalty = 1
else:
brevity_penalty = math.exp(1 - (reference_length / continuation_length))
# Adjust the BLEU score with the brevity penalty
bleu_score = bleu_score * brevity_penalty
return bleu_score
@catch_all_exceptions
def rougeL_score(
continuation: str,
reference: str
) -> float:
f = lambda text: list(jieba.cut(text))
rouge = evaluate.load('uhgeval/.cache/huggingface/rouge')
results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL'])
score = results['rougeL']
return score
@catch_all_exceptions
def kw_precision(
continuation: str,
reference: str,
kw_extracter: Callable[[str], list[str]],
with_kw_list: bool = True
) -> float | tuple[float, list[str], list[str]]:
"""Measure the rationality of a generated continuation sentence with respect to the original news object."""
kws = kw_extracter(continuation)
if len(kws) == 0:
return 0, [], [] if with_kw_list else 0
appeared_kws = [kw for kw in kws if kw in reference]
precision = len(appeared_kws) / len(kws)
return precision, appeared_kws, kws if with_kw_list else precision
@catch_all_exceptions
def bert_score(
continuation: str,
reference: str
) -> float:
"""
Note:
Requesting the network to connect to Hugging Face.
"""
sim = Similarity()
score = sim.get_score(continuation, reference)
return score
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
if precision + recall == 0:
f1 = 0
else:
f1<fim_suffix><fim_middle> = 2 * (precision * recall) / (precision + recall)
|
= 2 * (precision * recall) / (precision + recall)
|
STATEMENT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix># @Author : Shichao Song
# @Email : [email protected]
from typing import Callable
import evaluate
import jieba
from loguru import logger
from text2vec import Similarity
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
logger.warning(repr(e))<fim_suffix><fim_middle>
| null |
STATEMENT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix># @Author : Shichao Song
# @Email : [email protected]
from typing import Callable
import evaluate
import jieba
from loguru import logger
from text2vec import Similarity
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
logger.warning(repr(e))
return wrapper
@catch_all_exceptions
def bleu4_score(
continuation: str,
reference: str,
with_penalty = False
) -> float:
import math
from nltk.translate.bleu_score import sentence_bleu
# Tokenize the continuation and reference texts using the custom tokenizer function
continuation_tokens = custom_tokenizer(continuation)
reference_tokens = custom_tokenizer(reference)
# Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function
bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25))
# If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty
if with_penalty:
# Calculate the length of the reference and continuation texts
reference_length = len(reference_tokens)
continuation_length = len(continuation_tokens)
# Calculate the brevity penalty factor
if continuation_length > reference_length:
brevity_penalty = 1
else:
brevity_penalty = math.exp(1 - (reference_length / continuation_length))
# Adjust the BLEU score with the brevity penalty
bleu_score = bleu_score * brevity_penalty
return bleu_score
@catch_all_exceptions
def rougeL_score(
continuation: str,
reference: str
) -> float:
f = lambda text: list(jieba.cut(text))
rouge = evaluate.load('uhgeval/.cache/huggingface/rouge')
results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL'])
score = results['rougeL']
return score
@catch_all_exceptions
def kw_precision(
continuation: str,
reference: str,
kw_extracter: Callable[[str], list[str]],
with_kw_list: bool = True
) -> float | tuple[float, list[str], list[str]]:
"""Measure the rationality of a generated continuation sentence with respect to the original news object."""
kws = kw_extracter(continuation)
if len(kws) == 0:
return 0, [], [] if with_kw_list else 0
appeared_kws = [kw for kw in kws if kw in reference]
precision = len(appeared_kws) / len(kws)
return precision, appeared_kws, kws if with_kw_list else precision
@catch_all_exceptions
def bert_score(
continuation: str,
reference: str
) -> float:
"""
Note:
Requesting the network to connect to Hugging Face.
"""
sim = Similarity()
score = sim.get_score(continuation, reference)
return score
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
false_positive<fim_suffix><fim_middle> = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
|
= sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
|
STATEMENT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix># @Author : Shichao Song
# @Email : [email protected]
from typing import Callable
import evaluate
import jieba
from loguru import logger
from text2vec import Similarity
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
logger.warning(repr(e))
return wrapper
@catch_all_exceptions
def bleu4_score(
continuation: str,
reference: str,
with_penalty = False
) -> float:
import math
from nltk.translate.bleu_score import sentence_bleu
# Tokenize the continuation and reference texts using the custom tokenizer function
continuation_tokens = custom_tokenizer(continuation)
reference_tokens = custom_tokenizer(reference)
# Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function
bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25))
# If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty
if with_penalty:
# Calculate the length of the reference and continuation texts
reference_length = len(reference_tokens)
continuation_length = len(continuation_tokens)
# Calculate the brevity penalty factor
if continuation_length > reference_length:
brevity_penalty = 1
else:
brevity_penalty = math.exp(1 - (reference_length / continuation_length))
# Adjust the BLEU score with the brevity penalty
bleu_score = bleu_score * brevity_penalty
return bleu_score
@catch_all_exceptions
def rougeL_score(
continuation: str,
reference: str
) -> float:
f = lambda text: list(jieba.cut(text))
rouge = evaluate.load('uhgeval/.cache/huggingface/rouge')
results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL'])
score = results['rougeL']
return score
@catch_all_exceptions
def kw_precision(
continuation: str,
reference: str,
kw_extracter: Callable[[str], list[str]],
with_kw_list: bool = True
) -> float | tuple[float, list[str], list[str]]:
"""Measure the rationality of a generated continuation sentence with respect to the original news object."""
kws = kw_extracter(continuation)
if len(kws) == 0:
return 0, [], [] if with_kw_list else 0
appeared_kws = [kw for kw in kws if kw in reference]
precision = len(appeared_kws) / len(kws)
return precision, appeared_kws, kws if with_kw_list else precision
@catch_all_exceptions
def bert_score(
continuation: str,
reference: str
) -> float:
"""
Note:
Requesting the network to connect to Hugging Face.
"""
sim = Similarity()
score = sim.get_score(continuation, reference)
return score
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
if precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall)
accuracy<fim_suffix><fim_middle> = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0
|
= sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0
|
STATEMENT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix># @Author : Shichao Song
# @Email : [email protected]
from typing import Callable
import evaluate
import jieba
from loguru import logger
from text2vec import Similarity
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
logger.warning(repr(e))
return wrapper
@catch_all_exceptions
def bleu4_score(
continuation: str,
reference: str,
with_penalty = False
) -> float:
import math
from nltk.translate.bleu_score import sentence_bleu
# Tokenize the continuation and reference texts using the custom tokenizer function
continuation_tokens = custom_tokenizer(continuation)
reference_tokens = custom_tokenizer(reference)
# Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function
bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25))
# If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty
if with_penalty:
# Calculate the length of the reference and continuation texts
reference_length = len(reference_tokens)
continuation_length = len(continuation_tokens)
# Calculate the brevity penalty factor
if continuation_length > reference_length:
brevity_penalty = 1
else:
brevity_penalty = math.exp(1 - (reference_length / continuation_length))
# Adjust the BLEU score with the brevity penalty
bleu_score = bleu_score * brevity_penalty
return bleu_score
@catch_all_exceptions
def rougeL_score(
continuation: str,
reference: str
) -> float:
f = lambda text: list(jieba.cut(text))
rouge = evaluate.load('uhgeval/.cache/huggingface/rouge')
results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL'])
score = results['rougeL']
return score
@catch_all_exceptions
def kw_precision(
continuation: str,
reference: str,
kw_extracter: Callable[[str], list[str]],
with_kw_list: bool = True
) -> float | tuple[float, list[str], list[str]]:
"""Measure the rationality of a generated continuation sentence with respect to the original news object."""
kws = kw_extracter(continuation)
if len(kws) == 0:
return 0, [], [] if with_kw_list else 0
appeared_kws = [kw for kw in kws if kw in reference]
precision = len(appeared_kws) / len(kws)
return precision, appeared_kws, kws if with_kw_list else precision
@catch_all_exceptions
def bert_score(
continuation: str,
reference: str
) -> float:
"""
Note:
Requesting the network to connect to Hugging Face.
"""
sim = Similarity()
score = sim.get_score(continuation, reference)
return score
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive<fim_suffix><fim_middle> = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
|
= sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
|
STATEMENT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix># @Author : Shichao Song
# @Email : [email protected]
from typing import Callable
import evaluate
import jieba
from loguru import logger
from text2vec import Similarity
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
logger.warning(repr(e))
return wrapper
@catch_all_exceptions
def bleu4_score(
continuation: str,
reference: str,
with_penalty = False
) -> float:
import math
from nltk.translate.bleu_score import sentence_bleu
# Tokenize the continuation and reference texts using the custom tokenizer function
continuation_tokens = custom_tokenizer(continuation)
reference_tokens = custom_tokenizer(reference)
# Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function
bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25))
# If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty
if with_penalty:
# Calculate the length of the reference and continuation texts
reference_length = len(reference_tokens)
continuation_length = len(continuation_tokens)
# Calculate the brevity penalty factor
if continuation_length > reference_length:
brevity_penalty = 1
else:
brevity_penalty = math.exp(1 - (reference_length / continuation_length))
# Adjust the BLEU score with the brevity penalty
bleu_score = bleu_score * brevity_penalty
return bleu_score
@catch_all_exceptions
def rougeL_score(
continuation: str,
reference: str
) -> float:
f = lambda text: list(jieba.cut(text))
rouge = evaluate.load('uhgeval/.cache/huggingface/rouge')
results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL'])
score = results['rougeL']
return score
@catch_all_exceptions
def kw_precision(
continuation: str,
reference: str,
kw_extracter: Callable[[str], list[str]],
with_kw_list: bool = True
) -> float | tuple[float, list[str], list[str]]:
"""Measure the rationality of a generated continuation sentence with respect to the original news object."""
kws = kw_extracter(continuation)
if len(kws) == 0:
return 0, [], [] if with_kw_list else 0
appeared_kws = [kw for kw in kws if kw in reference]
precision = len(appeared_kws) / len(kws)
return precision, appeared_kws, kws if with_kw_list else precision
@catch_all_exceptions
def bert_score(
continuation: str,
reference: str
) -> float:
"""
Note:
Requesting the network to connect to Hugging Face.
"""
sim = Similarity()
score = sim.get_score(continuation, reference)
return score
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision<fim_suffix><fim_middle> = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
|
= true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
|
STATEMENT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UHGEval/uhgeval/dataset/truthfulqa.py<fim_prefix># @Author : YeZhaohui Wang
# @Email : [email protected]
import csv
import json
import os
import random
from uhgeval.dataset.base import BaseDataset
class TruthfunQAGeneration(BaseDataset):
def __init__(self, path: str, shuffle: bool = False, seed: int = 22):
self.data = []
if os.path.isfile(path):
with open(path, 'r', encoding='utf-8-sig') as file:
csv_reader = csv.DictReader(file)
id = 1
for row in csv_reader:
row['id'] = id
id += 1
self.data.append(row)
if shuffle:
random.seed(seed)
random.shuffle(self.data)
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, key: int | slice) -> dict | list[dict]:
return self.data[key]
def load(self) -> list[dict]:
return self.data[:]
class TruthfunQAMC1(BaseDataset):
def __init__(self, path: str, shuffle: bool = False, seed: int = 22):
self.data = []
id = 1
if os.path.isfile(path):
with open(path, encoding='utf-8') as f:
self.data = json.load(f)
for row in self.data:
row['id'] = id
id += 1
if shuffle:
random.seed(seed)
random.shuffle(self.data)
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, key: int | slice) -> dict | list[dict]:
return self.data[key]
def load(self) -> list[dict]:
return<fim_suffix><fim_middle> self.data[:]
|
self.data[:]
|
STATEMENT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix># @Author : Shichao Song
# @Email : [email protected]
from typing import Callable
import evaluate
import jieba
from loguru import logger
from text2vec import Similarity
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return<fim_suffix><fim_middle> result
|
result
|
STATEMENT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix># @Author : Shichao Song
# @Email : [email protected]
from typing import Callable
import evaluate
import jieba
from loguru import logger
from text2vec import Similarity
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
logger.warning(repr(e))
return wrapper
@catch_all_exceptions
def bleu4_score(
continuation: str,
reference: str,
with_penalty = False
) -> float:
import math
from nltk.translate.bleu_score import sentence_bleu
# Tokenize the continuation and reference texts using the custom tokenizer function
continuation_tokens = custom_tokenizer(continuation)
reference_tokens = custom_tokenizer(reference)
# Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function
bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25))
# If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty
if with_penalty:
# Calculate the length of the reference and continuation texts
reference_length = len(reference_tokens)
continuation_length = len(continuation_tokens)
# Calculate the brevity penalty factor
if continuation_length > reference_length:
brevity_penalty = 1
else:
brevity_penalty = math.exp(1 - (reference_length / continuation_length))
# Adjust the BLEU score with the brevity penalty
bleu_score = bleu_score * brevity_penalty
return bleu_score
@catch_all_exceptions
def rougeL_score(
continuation: str,
reference: str
) -> float:
f = lambda text: list(jieba.cut(text))
rouge = evaluate.load('uhgeval/.cache/huggingface/rouge')
results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL'])
score = results['rougeL']
return score
@catch_all_exceptions
def kw_precision(
continuation: str,
reference: str,
kw_extracter: Callable[[str], list[str]],
with_kw_list: bool = True
) -> float | tuple[float, list[str], list[str]]:
"""Measure the rationality of a generated continuation sentence with respect to the original news object."""
kws = kw_extracter(continuation)
if len(kws) == 0:
return 0, [], [] if with_kw_list else 0
appeared_kws = [kw for kw in kws if kw in reference]
precision = len(appeared_kws) / len(kws)
return precision, appeared_kws, kws if with_kw_list else precision
@catch_all_exceptions
def bert_score(
continuation: str,
reference: str
) -> float:
"""
Note:
Requesting the network to connect to Hugging Face.
"""
sim = Similarity()
score = sim.get_score(continuation, reference)
return score
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
if<fim_suffix><fim_middle> precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall)
|
precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall)
|
IF
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix># @Author : Shichao Song
# @Email : [email protected]
from typing import Callable
import evaluate
import jieba
from loguru import logger
from text2vec import Similarity
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
try:<fim_suffix><fim_middle>
result = func(*args, **kwargs)
return result
|
result = func(*args, **kwargs)
return result
|
TRY
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix># @Author : Shichao Song
# @Email : [email protected]
from typing import Callable
import evaluate
import jieba
from loguru import logger
from text2vec import Similarity
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except<fim_suffix><fim_middle> Exception as e:
logger.warning(repr(e))
|
Exception as e:
logger.warning(repr(e))
|
CATCH
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix># @Author : Shichao Song
# @Email : [email protected]
from typing import Callable
import evaluate
import jieba
from loguru import logger
from text2vec import Similarity
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
logger.warning(repr(e))
return wrapper
@catch_all_exceptions
def bleu4_score(
continuation: str,
reference: str,
with_penalty = False
) -> float:
import math
from nltk.translate.bleu_score import sentence_bleu
# Tokenize the continuation and reference texts using the custom tokenizer function
continuation_tokens = custom_tokenizer(continuation)
reference_tokens = custom_tokenizer(reference)
# Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function
bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25))
# If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty
if with_penalty:
# Calculate the length of the reference and continuation texts
reference_length = len(reference_tokens)
continuation_length = len(continuation_tokens)
# Calculate the brevity penalty factor
if continuation_length > reference_length:
brevity_penalty = 1
else:
brevity_penalty = math.exp(1 - (reference_length / continuation_length))
# Adjust the BLEU score with the brevity penalty
bleu_score = bleu_score * brevity_penalty
return bleu_score
@catch_all_exceptions
def rougeL_score(
continuation: str,
reference: str
) -> float:
f = lambda text: list(jieba.cut(text))
rouge = evaluate.load('uhgeval/.cache/huggingface/rouge')
results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL'])
score = results['rougeL']
return score
@catch_all_exceptions
def kw_precision(
continuation: str,
reference: str,
kw_extracter: Callable[[str], list[str]],
with_kw_list: bool = True
) -> float | tuple[float, list[str], list[str]]:
"""Measure the rationality of a generated continuation sentence with respect to the original news object."""
kws = kw_extracter(continuation)
if len(kws) == 0:
return 0, [], [] if with_kw_list else 0
appeared_kws = [kw for kw in kws if kw in reference]
precision = len(appeared_kws) / len(kws)
return precision, appeared_kws, kws if with_kw_list else precision
@catch_all_exceptions
def bert_score(
continuation: str,
reference: str
) -> float:
"""
Note:
Requesting the network to connect to Hugging Face.
"""
sim = Similarity()
score = sim.get_score(continuation, reference)
return score
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""<fim_suffix><fim_middle>
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
|
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
|
BLOCK_COMMENT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/structures/image_list.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates.
from __future__ import division
from typing import Any, List, Tuple
import torch
from torch import device
from torch.nn import functional as F
from detectron2.layers.wrappers import shapes_to_tensor
class ImageList(object):
"""
Structure that holds a list of images (of possibly
varying sizes) as a single tensor.
This works by padding the images to the same size.
The original sizes of each image is stored in `image_sizes`.
Attributes:
image_sizes (list[tuple[int, int]]): each tuple is (h, w).
During tracing, it becomes list[Tensor] instead.
"""
def __init__(self, tensor: torch.Tensor, image_sizes: List[Tuple[int, int]]):
"""
Arguments:
tensor (Tensor): of shape (N, H, W) or (N, C_1, ..., C_K, H, W) where K >= 1
image_sizes (list[tuple[int, int]]): Each tuple is (h, w). It can
be smaller than (H, W) due to padding.
"""
self.tensor = tensor
self.image_sizes = image_sizes
def __len__(self) -> int:
return len(self.image_sizes)
def __getitem__(self, idx) -> torch.Tensor:
"""<fim_suffix><fim_middle>
Access the individual image in its original size.
Args:
idx: int or slice
Returns:
Tensor: an image of shape (H, W) or (C_1, ..., C_K, H, W) where K >= 1
"""
|
Access the individual image in its original size.
Args:
idx: int or slice
Returns:
Tensor: an image of shape (H, W) or (C_1, ..., C_K, H, W) where K >= 1
"""
|
BLOCK_COMMENT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/solver/build.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates.
import copy
import itertools
import logging
from collections import defaultdict
from enum import Enum
from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Type, Union
import torch
from fvcore.common.param_scheduler import CosineParamScheduler, MultiStepParamScheduler
from detectron2.config import CfgNode
from .lr_scheduler import LRMultiplier, WarmupParamScheduler
_GradientClipperInput = Union[torch.Tensor, Iterable[torch.Tensor]]
_GradientClipper = Callable[[_GradientClipperInput], None]
class GradientClipType(Enum):
VALUE = "value"
NORM = "norm"
def _create_gradient_clipper(cfg: CfgNode) -> _GradientClipper:
"""
Creates gradient clipping closure to clip by value or by norm,
according to the provided config.
"""
cfg = copy.deepcopy(cfg)
def clip_grad_norm(p: _GradientClipperInput):
torch.nn.utils.clip_grad_norm_(p, cfg.CLIP_VALUE, cfg.NORM_TYPE)
def clip_grad_value(p: _GradientClipperInput):
torch.nn.utils.clip_grad_value_(p, cfg.CLIP_VALUE)
_GRADIENT_CLIP_TYPE_TO_CLIPPER = {
GradientClipType.VALUE: clip_grad_value,
GradientClipType.NORM: clip_grad_norm,
}
return _GRADIENT_CLIP_TYPE_TO_CLIPPER[GradientClipType(cfg.CLIP_TYPE)]
def _generate_optimizer_class_with_gradient_clipping(
optimizer: Type[torch.optim.Optimizer],
*,
per_param_clipper: Optional[_GradientClipper] = None,
global_clipper: Optional[_GradientClipper] = None,
) -> Type[torch.optim.Optimizer]:
"""
Dynamically creates a new type that inherits the type of a given instance
and overrides the `step` method to add gradient clipping
"""
assert (
per_param_clipper is None or global_clipper is None
), "Not allowed to use both per-parameter clipping and global clipping"
def optimizer_wgc_step(self, closure=None):
if per_param_clipper is not None:
for group in self.param_groups:
for p in group["params"]:
per_param_clipper(p)
else:
# global clipper for future use with detr
# (https://github.com/facebookresearch/detr/pull/287)
all_params = itertools.chain(*[g["params"] for g in self.param_groups])
global_clipper(all_params)
super(type(self), self).step(closure)
OptimizerWithGradientClip = type(
optimizer.__name__ + "WithGradientClip",
(optimizer,),
{"step": optimizer_wgc_step},
)
return OptimizerWithGradientClip
def maybe_add_gradient_clipping(
cfg: CfgNode, optimizer: Type[torch.optim.Optimizer]
) -> Type[torch.optim.Optimizer]:
"""
If gradient clipping is enabled through config options, wraps the existing
optimizer type to become a new dynamically created class OptimizerWithGradientClip
that inherits the given optimizer and overrides the `step` method to
include gradient clipping.
Args:
cfg: CfgNode, configuration options
optimizer: type. A subclass of torch.optim.Optimizer
Return:
type: either the input `optimizer` (if gradient clipping is disabled), or
a subclass of it with gradient clipping included in the `step` method.
"""
if not cfg.SOLVER.CLIP_GRADIENTS.ENABLED:
return optimizer
if isinstance(optimizer, torch.optim.Optimizer):
optimizer_type = type(optimizer)
else:
assert issubclass(optimizer, torch.optim.Optimizer), optimizer
optimizer_type = optimizer
grad_clipper = _create_gradient_clipper(cfg.SOLVER.CLIP_GRADIENTS)
OptimizerWithGradientClip = _generate_optimizer_class_with_gradient_clipping(
optimizer_type, per_param_clipper=grad_clipper
)
if isinstance(optimizer, torch.optim.Optimizer):
optimizer.__class__ = OptimizerWithGradientClip # a bit hacky, not recommended
return optimizer
else:
return OptimizerWithGradientClip
def build_optimizer(cfg: CfgNode, model: torch.nn.Module) -> torch.optim.Optimizer:
"""
Build an optimizer from config.
"""
params = get_default_optimizer_params(
model,
base_lr=cfg.SOLVER.BASE_LR,
weight_decay_norm=cfg.SOLVER.WEIGHT_DECAY_NORM,
bias_lr_factor=cfg.SOLVER.BIAS_LR_FACTOR,
weight_decay_bias=cfg.SOLVER.WEIGHT_DECAY_BIAS,
)
return maybe_add_gradient_clipping(cfg, torch.optim.SGD)(
params,
lr=cfg.SOLVER.BASE_LR,
momentum=cfg.SOLVER.MOMENTUM,
nesterov=cfg.SOLVER.NESTEROV,
weight_decay=cfg.SOLVER.WEIGHT_DECAY,
)
def get_default_optimizer_params(
model: torch.nn.Module,
base_lr: Optional[float] = None,
weight_decay: Optional[float] = None,
weight_decay_norm: Optional[float] = None,
bias_lr_factor: Optional[float] = 1.0,
weight_decay_bias: Optional[float] = None,
overrides: Optional[Dict[str, Dict[str, float]]] = None,
) -> List[Dict[str, Any]]:
"""
Get default param list for optimizer, with support for a few types of
overrides. If no overrides needed, this is equivalent to `model.parameters()`.
Args:
base_lr: lr for every group by default. Can be omitted to use the one in optimizer.
weight_decay: weight decay for every group by default. Can be omitted to use the one
in optimizer.
weight_decay_norm: override weight decay for params in normalization layers
bias_lr_factor: multiplier of lr for bias parameters.
weight_decay_bias: override weight decay for bias parameters
overrides: if not `None`, provides values for optimizer hyperparameters
(LR, weight decay) for module parameters with a given name; e.g.
``{"embedding": {"lr": 0.01, "weight_decay": 0.1}}`` will set the LR and
weight decay values for all module parameters named `embedding`.
For common detection models, ``weight_decay_norm`` is the only option
needed to be set. ``bias_lr_factor,weight_decay_bias`` are legacy settings
from Detectron1 that are not found useful.
Example:
::
torch.optim.SGD(get_default_optimizer_params(model, weight_decay_norm=0),
lr=0.01, weight_decay=1e-4, momentum=0.9)
"""
if overrides is None:
overrides = {}
defaults = {}
if base_lr is not None:
defaults["lr"] = base_lr
if weight_decay is not None:
defaults["weight_decay"] = weight_decay
bias_overrides = {}
if bias_lr_factor is not None and bias_lr_factor != 1.0:
# NOTE: unlike Detectron v1, we now by default make bias hyperparameters
# exactly the same as regular weights.
if base_lr is None:
raise ValueError("bias_lr_factor requires base_lr")
bias_overrides["lr"] = base_lr * bias_lr_factor
if weight_decay_bias is not None:
bias_overrides["weight_decay"] = weight_decay_bias
if len(bias_overrides):
if "bias" in overrides:
raise ValueError("Conflicting overrides for 'bias'")
overrides["bias"] = bias_overrides
norm_module_types = (
torch.nn.BatchNorm1d,
torch.nn.BatchNorm2d,
torch.nn.BatchNorm3d,
torch.nn.SyncBatchNorm,
# NaiveSyncBatchNorm inherits from BatchNorm2d
torch.nn.GroupNorm,
torch.nn.InstanceNorm1d,
torch.nn.InstanceNorm2d,
torch.nn.InstanceNorm3d,
torch.nn.LayerNorm,
torch.nn.LocalResponseNorm,
)
params: List[Dict[str, Any]] = []
memo: Set[torch.nn.parameter.Parameter] = set()
for module in model.modules():
for module_param_name, value in module.named_parameters(recurse=False):
if not value.requires_grad:
continue
# Avoid duplicating parameters
if value in memo:
continue
memo.add(value)
hyperparams = copy.copy(defaults)
if isinstance(module, norm_module_types) and weight_decay_norm is not None:
hyperparams["weight_decay"] = weight_decay_norm
hyperparams.update(overrides.get(module_param_name, {}))
params.append({"params": [value], **hyperparams})
return reduce_param_groups(params)
def _expand_param_groups(params: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
# Transform parameter groups into per-parameter structure.
# Later items in `params` can overwrite parameters set in previous items.
ret = defaultdict(dict)
for item in params:
assert "params" in item
cur_params = {x: y for x, y in item.items() if x != "params"}
for param in item["params"]:
ret[param].update({"params": [param], **cur_params})
return list(ret.values())
def reduce_param_groups(params: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
# Reorganize the parameter groups and merge duplicated groups.
# The number of parameter groups needs to be as small as possible in order
# to efficiently use the PyTorch multi-tensor optimizer. Therefore instead
# of using a parameter_group per single parameter, we reorganize the
# parameter groups and merge duplicated groups. This approach speeds
# up multi-tensor optimizer significantly.
params = _expand_param_groups(params)
groups = defaultdict(list) # re-group all parameter groups by their hyperparams
for item in params:
cur_params = tuple((x, y) for x, y in item.items() if x != "params")
groups[cur_params].extend(item["params"])
ret = []
for param_keys, param_values in groups.items():
cur = {kv[0]: kv[1] for kv in param_keys}
cur["params"] = param_values
ret.append(cur)
return ret
def build_lr_scheduler(
cfg: CfgNode, optimizer: torch.optim.Optimizer
) -> torch.optim.lr_scheduler._LRScheduler:
"""<fim_suffix><fim_middle>
Build a LR scheduler from config.
"""
|
Build a LR scheduler from config.
"""
|
BLOCK_COMMENT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/tracking/bbox_iou_tracker.py<fim_prefix>#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import copy
from typing import List
import numpy as np
import torch
from detectron2.config import configurable
from detectron2.structures import Boxes, Instances
from detectron2.structures.boxes import pairwise_iou
from ..config.config import CfgNode as CfgNode_
from .base_tracker import BaseTracker, TRACKER_HEADS_REGISTRY
@TRACKER_HEADS_REGISTRY.register()
class BBoxIOUTracker(BaseTracker):
"""
A bounding box tracker to assign ID based on IoU between current and previous instances
"""
@configurable
def __init__(
self,
*,
video_height: int,
video_width: int,
max_num_instances: int = 200,
max_lost_frame_count: int = 0,
min_box_rel_dim: float = 0.02,
min_instance_period: int = 1,
track_iou_threshold: float = 0.5,
**kwargs
):
"""
Args:
video_height: height the video frame
video_width: width of the video frame
max_num_instances: maximum number of id allowed to be tracked
max_lost_frame_count: maximum number of frame an id can lost tracking
exceed this number, an id is considered as lost
forever
min_box_rel_dim: a percentage, smaller than this dimension, a bbox is
removed from tracking
min_instance_period: an instance will be shown after this number of period
since its first showing up in the video
track_iou_threshold: iou threshold, below this number a bbox pair is removed
from tracking
"""
super().__init__(**kwargs)
self._video_height = video_height
self._video_width = video_width
self._max_num_instances = max_num_instances
self._max_lost_frame_count = max_lost_frame_count
self._min_box_rel_dim = min_box_rel_dim
self._min_instance_period = min_instance_period
self._track_iou_threshold = track_iou_threshold
@classmethod
def from_config(cls, cfg: CfgNode_):
"""
Old style initialization using CfgNode
Args:
cfg: D2 CfgNode, config file
Return:
dictionary storing arguments for __init__ method
"""
assert "VIDEO_HEIGHT" in cfg.TRACKER_HEADS
assert "VIDEO_WIDTH" in cfg.TRACKER_HEADS
video_height = cfg.TRACKER_HEADS.get("VIDEO_HEIGHT")
video_width = cfg.TRACKER_HEADS.get("VIDEO_WIDTH")
max_num_instances = cfg.TRACKER_HEADS.get("MAX_NUM_INSTANCES", 200)
max_lost_frame_count = cfg.TRACKER_HEADS.get("MAX_LOST_FRAME_COUNT", 0)
min_box_rel_dim = cfg.TRACKER_HEADS.get("MIN_BOX_REL_DIM", 0.02)
min_instance_period = cfg.TRACKER_HEADS.get("MIN_INSTANCE_PERIOD", 1)
track_iou_threshold = cfg.TRACKER_HEADS.get("TRACK_IOU_THRESHOLD", 0.5)
return {
"_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker",
"video_height": video_height,
"video_width": video_width,
"max_num_instances": max_num_instances,
"max_lost_frame_count": max_lost_frame_count,
"min_box_rel_dim": min_box_rel_dim,
"min_instance_period": min_instance_period,
"track_iou_threshold": track_iou_threshold
}
def update(self, instances: Instances) -> Instances:
"""
See BaseTracker description
"""
if instances.has("pred_keypoints"):
raise NotImplementedError("Need to add support for keypoints")
instances = self._initialize_extra_fields(instances)
if self._prev_instances is not None:
# calculate IoU of all bbox pairs
iou_all = pairwise_iou(
boxes1=instances.pred_boxes,
boxes2=self._prev_instances.pred_boxes,
)
# sort IoU in descending order
bbox_pairs = self._create_prediction_pairs(instances, iou_all)
# assign previous ID to current bbox if IoU > track_iou_threshold
self._reset_fields()
for bbox_pair in bbox_pairs:
idx = bbox_pair["idx"]
prev_id = bbox_pair["prev_id"]
if idx in self._matched_idx \
or prev_id in self._matched_ID \
or bbox_pair["IoU"] < self._track_iou_threshold:
continue
instances.ID[idx] = prev_id
instances.ID_period[idx] = bbox_pair["prev_period"] + 1
instances.lost_frame_count[idx] = 0
self._matched_idx.add(idx)
self._matched_ID.add(prev_id)
self._untracked_prev_idx.remove(bbox_pair["prev_idx"])
instances = self._assign_new_id(instances)
instances = self._merge_untracked_instances(instances)
self._prev_instances = copy.deepcopy(instances)
return instances
def _create_prediction_pairs(
self, instances: Instances, iou_all: np.ndarray
) -> List:
"""
For all instances in previous and current frames, create pairs. For each
pair, store index of the instance in current frame predcitions, index in
previous predictions, ID in previous predictions, IoU of the bboxes in this
pair, period in previous predictions.
Args:
instances: D2 Instances, for predictions of the current frame
iou_all: IoU for all bboxes pairs
Return:
A list of IoU for all pairs
"""
bbox_pairs = []
for i in range(len(instances)):
for j in range(len(self._prev_instances)):
bbox_pairs.append(
{
"idx": i,
"prev_idx": j,
"prev_id": self._prev_instances.ID[j],
"IoU": iou_all[i, j],
"prev_period": self._prev_instances.ID_period[j],
}
)
return bbox_pairs
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
if not instances.has("ID"):
instances.set("ID", [None] * len(instances))
if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
if not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances))
if self._prev_instances is None:
instances.ID = list(range(len(instances)))
self._id_count += len(instances)
instances.ID_period = [1] * len(instances)
instances.lost_frame_count = [0] * len(instances)
return instances
def _reset_fields(self):
"""
Before each uodate call, reset fields first
"""
self._matched_idx = set()
self._matched_ID = set()
self._untracked_prev_idx = set(range(len(self._prev_instances)))
def _assign_new_id(self, instances: Instances) -> Instances:
"""<fim_suffix><fim_middle>
For each untracked instance, assign a new id
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with new ID assigned
"""
|
For each untracked instance, assign a new id
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with new ID assigned
"""
|
BLOCK_COMMENT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/structures/masks.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates.
import copy
import itertools
import numpy as np
from typing import Any, Iterator, List, Union
import pycocotools.mask as mask_util
import torch
from torch import device
from detectron2.layers.roi_align import ROIAlign
from detectron2.utils.memory import retry_if_cuda_oom
from .boxes import Boxes
def polygon_area(x, y):
# Using the shoelace formula
# https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
def polygons_to_bitmask(polygons: List[np.ndarray], height: int, width: int) -> np.ndarray:
"""
Args:
polygons (list[ndarray]): each array has shape (Nx2,)
height, width (int)
Returns:
ndarray: a bool mask of shape (height, width)
"""
if len(polygons) == 0:
# COCOAPI does not support empty polygons
return np.zeros((height, width)).astype(np.bool)
rles = mask_util.frPyObjects(polygons, height, width)
rle = mask_util.merge(rles)
return mask_util.decode(rle).astype(np.bool)
def rasterize_polygons_within_box(
polygons: List[np.ndarray], box: np.ndarray, mask_size: int
) -> torch.Tensor:
"""
Rasterize the polygons into a mask image and
crop the mask content in the given box.
The cropped mask is resized to (mask_size, mask_size).
This function is used when generating training targets for mask head in Mask R-CNN.
Given original ground-truth masks for an image, new ground-truth mask
training targets in the size of `mask_size x mask_size`
must be provided for each predicted box. This function will be called to
produce such targets.
Args:
polygons (list[ndarray[float]]): a list of polygons, which represents an instance.
box: 4-element numpy array
mask_size (int):
Returns:
Tensor: BoolTensor of shape (mask_size, mask_size)
"""
# 1. Shift the polygons w.r.t the boxes
w, h = box[2] - box[0], box[3] - box[1]
polygons = copy.deepcopy(polygons)
for p in polygons:
p[0::2] = p[0::2] - box[0]
p[1::2] = p[1::2] - box[1]
# 2. Rescale the polygons to the new box size
# max() to avoid division by small number
ratio_h = mask_size / max(h, 0.1)
ratio_w = mask_size / max(w, 0.1)
if ratio_h == ratio_w:
for p in polygons:
p *= ratio_h
else:
for p in polygons:
p[0::2] *= ratio_w
p[1::2] *= ratio_h
# 3. Rasterize the polygons with coco api
mask = polygons_to_bitmask(polygons, mask_size, mask_size)
mask = torch.from_numpy(mask)
return mask
class BitMasks:
"""
This class stores the segmentation masks for all objects in one image, in
the form of bitmaps.
Attributes:
tensor: bool Tensor of N,H,W, representing N instances in the image.
"""
def __init__(self, tensor: Union[torch.Tensor, np.ndarray]):
"""
Args:
tensor: bool Tensor of N,H,W, representing N instances in the image.
"""
device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
tensor = torch.as_tensor(tensor, dtype=torch.bool, device=device)
assert tensor.dim() == 3, tensor.size()
self.image_size = tensor.shape[1:]
self.tensor = tensor
@torch.jit.unused
def to(self, *args: Any, **kwargs: Any) -> "BitMasks":
return BitMasks(self.tensor.to(*args, **kwargs))
@property
def device(self) -> torch.device:
return self.tensor.device
@torch.jit.unused
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "BitMasks":
"""
Returns:
BitMasks: Create a new :class:`BitMasks` by indexing.
The following usage are allowed:
1. `new_masks = masks[3]`: return a `BitMasks` which contains only one mask.
2. `new_masks = masks[2:10]`: return a slice of masks.
3. `new_masks = masks[vector]`, where vector is a torch.BoolTensor
with `length = len(masks)`. Nonzero elements in the vector will be selected.
Note that the returned object might share storage with this object,
subject to Pytorch's indexing semantics.
"""
if isinstance(item, int):
return BitMasks(self.tensor[item].unsqueeze(0))
m = self.tensor[item]
assert m.dim() == 3, "Indexing on BitMasks with {} returns a tensor with shape {}!".format(
item, m.shape
)
return BitMasks(m)
@torch.jit.unused
def __iter__(self) -> torch.Tensor:
yield from self.tensor
@torch.jit.unused
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.tensor))
return s
def __len__(self) -> int:
return self.tensor.shape[0]
def nonempty(self) -> torch.Tensor:
"""
Find masks that are non-empty.
Returns:
Tensor: a BoolTensor which represents
whether each mask is empty (False) or non-empty (True).
"""
return self.tensor.flatten(1).any(dim=1)
@staticmethod
def from_polygon_masks(
polygon_masks: Union["PolygonMasks", List[List[np.ndarray]]], height: int, width: int
) -> "BitMasks":
"""
Args:
polygon_masks (list[list[ndarray]] or PolygonMasks)
height, width (int)
"""
if isinstance(polygon_masks, PolygonMasks):
polygon_masks = polygon_masks.polygons
masks = [polygons_to_bitmask(p, height, width) for p in polygon_masks]
if len(masks):
return BitMasks(torch.stack([torch.from_numpy(x) for x in masks]))
else:
return BitMasks(torch.empty(0, height, width, dtype=torch.bool))
@staticmethod
def from_roi_masks(roi_masks: "ROIMasks", height: int, width: int) -> "BitMasks":
"""
Args:
roi_masks:
height, width (int):
"""
return roi_masks.to_bitmasks(height, width)
def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:
"""
Crop each bitmask by the given box, and resize results to (mask_size, mask_size).
This can be used to prepare training targets for Mask R-CNN.
It has less reconstruction error compared to rasterization with polygons.
However we observe no difference in accuracy,
but BitMasks requires more memory to store all the masks.
Args:
boxes (Tensor): Nx4 tensor storing the boxes for each mask
mask_size (int): the size of the rasterized mask.
Returns:
Tensor:
A bool tensor of shape (N, mask_size, mask_size), where
N is the number of predicted boxes for this image.
"""
assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self))
device = self.tensor.device
batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None]
rois = torch.cat([batch_inds, boxes], dim=1) # Nx5
bit_masks = self.tensor.to(dtype=torch.float32)
rois = rois.to(device=device)
output = (
ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True)
.forward(bit_masks[:, None, :, :], rois)
.squeeze(1)
)
output = output >= 0.5
return output
def get_bounding_boxes(self) -> Boxes:
"""<fim_suffix><fim_middle>
Returns:
Boxes: tight bounding boxes around bitmasks.
If a mask is empty, it's bounding box will be all zero.
"""
|
Returns:
Boxes: tight bounding boxes around bitmasks.
If a mask is empty, it's bounding box will be all zero.
"""
|
BLOCK_COMMENT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/tracking/bbox_iou_tracker.py<fim_prefix>#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import copy
from typing import List
import numpy as np
import torch
from detectron2.config import configurable
from detectron2.structures import Boxes, Instances
from detectron2.structures.boxes import pairwise_iou
from ..config.config import CfgNode as CfgNode_
from .base_tracker import BaseTracker, TRACKER_HEADS_REGISTRY
@TRACKER_HEADS_REGISTRY.register()
class BBoxIOUTracker(BaseTracker):
"""
A bounding box tracker to assign ID based on IoU between current and previous instances
"""
@configurable
def __init__(
self,
*,
video_height: int,
video_width: int,
max_num_instances: int = 200,
max_lost_frame_count: int = 0,
min_box_rel_dim: float = 0.02,
min_instance_period: int = 1,
track_iou_threshold: float = 0.5,
**kwargs
):
"""
Args:
video_height: height the video frame
video_width: width of the video frame
max_num_instances: maximum number of id allowed to be tracked
max_lost_frame_count: maximum number of frame an id can lost tracking
exceed this number, an id is considered as lost
forever
min_box_rel_dim: a percentage, smaller than this dimension, a bbox is
removed from tracking
min_instance_period: an instance will be shown after this number of period
since its first showing up in the video
track_iou_threshold: iou threshold, below this number a bbox pair is removed
from tracking
"""
super().__init__(**kwargs)
self._video_height = video_height
self._video_width = video_width
self._max_num_instances = max_num_instances
self._max_lost_frame_count = max_lost_frame_count
self._min_box_rel_dim = min_box_rel_dim
self._min_instance_period = min_instance_period
self._track_iou_threshold = track_iou_threshold
@classmethod
def from_config(cls, cfg: CfgNode_):
"""
Old style initialization using CfgNode
Args:
cfg: D2 CfgNode, config file
Return:
dictionary storing arguments for __init__ method
"""
assert "VIDEO_HEIGHT" in cfg.TRACKER_HEADS
assert "VIDEO_WIDTH" in cfg.TRACKER_HEADS
video_height = cfg.TRACKER_HEADS.get("VIDEO_HEIGHT")
video_width = cfg.TRACKER_HEADS.get("VIDEO_WIDTH")
max_num_instances = cfg.TRACKER_HEADS.get("MAX_NUM_INSTANCES", 200)
max_lost_frame_count = cfg.TRACKER_HEADS.get("MAX_LOST_FRAME_COUNT", 0)
min_box_rel_dim = cfg.TRACKER_HEADS.get("MIN_BOX_REL_DIM", 0.02)
min_instance_period = cfg.TRACKER_HEADS.get("MIN_INSTANCE_PERIOD", 1)
track_iou_threshold = cfg.TRACKER_HEADS.get("TRACK_IOU_THRESHOLD", 0.5)
return {
"_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker",
"video_height": video_height,
"video_width": video_width,
"max_num_instances": max_num_instances,
"max_lost_frame_count": max_lost_frame_count,
"min_box_rel_dim": min_box_rel_dim,
"min_instance_period": min_instance_period,
"track_iou_threshold": track_iou_threshold
}
def update(self, instances: Instances) -> Instances:
"""
See BaseTracker description
"""
if instances.has("pred_keypoints"):
raise NotImplementedError("Need to add support for keypoints")
instances = self._initialize_extra_fields(instances)
if self._prev_instances is not None:
# calculate IoU of all bbox pairs
iou_all = pairwise_iou(
boxes1=instances.pred_boxes,
boxes2=self._prev_instances.pred_boxes,
)
# sort IoU in descending order
bbox_pairs = self._create_prediction_pairs(instances, iou_all)
# assign previous ID to current bbox if IoU > track_iou_threshold
self._reset_fields()
for bbox_pair in bbox_pairs:
idx = bbox_pair["idx"]
prev_id = bbox_pair["prev_id"]
if idx in self._matched_idx \
or prev_id in self._matched_ID \
or bbox_pair["IoU"] < self._track_iou_threshold:
continue
instances.ID[idx] = prev_id
instances.ID_period[idx] = bbox_pair["prev_period"] + 1
instances.lost_frame_count[idx] = 0
self._matched_idx.add(idx)
self._matched_ID.add(prev_id)
self._untracked_prev_idx.remove(bbox_pair["prev_idx"])
instances = self._assign_new_id(instances)
instances = self._merge_untracked_instances(instances)
self._prev_instances = copy.deepcopy(instances)
return instances
def _create_prediction_pairs(
self, instances: Instances, iou_all: np.ndarray
) -> List:
"""
For all instances in previous and current frames, create pairs. For each
pair, store index of the instance in current frame predcitions, index in
previous predictions, ID in previous predictions, IoU of the bboxes in this
pair, period in previous predictions.
Args:
instances: D2 Instances, for predictions of the current frame
iou_all: IoU for all bboxes pairs
Return:
A list of IoU for all pairs
"""
bbox_pairs = []
for i in range(len(instances)):
for j in range(len(self._prev_instances)):
bbox_pairs.append(
{
"idx": i,
"prev_idx": j,
"prev_id": self._prev_instances.ID[j],
"IoU": iou_all[i, j],
"prev_period": self._prev_instances.ID_period[j],
}
)
return bbox_pairs
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
if not instances.has("ID"):
instances.set("ID", [None] * len(instances))
if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
if not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances))
if self._prev_instances is None:
instances.ID = list(range(len(instances)))
self._id_count += len(instances)
instances.ID_period = [1] * len(instances)
instances.lost_frame_count = [0] * len(instances)
return instances
def _reset_fields(self):
"""<fim_suffix><fim_middle>
Before each uodate call, reset fields first
"""
|
Before each uodate call, reset fields first
"""
|
BLOCK_COMMENT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/structures/boxes.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates.
import math
import numpy as np
from enum import IntEnum, unique
from typing import List, Tuple, Union
import torch
from torch import device
_RawBoxType = Union[List[float], Tuple[float, ...], torch.Tensor, np.ndarray]
@unique
class BoxMode(IntEnum):
"""
Enum of different ways to represent a box.
"""
XYXY_ABS = 0
"""
(x0, y0, x1, y1) in absolute floating points coordinates.
The coordinates in range [0, width or height].
"""
XYWH_ABS = 1
"""
(x0, y0, w, h) in absolute floating points coordinates.
"""
XYXY_REL = 2
"""
Not yet supported!
(x0, y0, x1, y1) in range [0, 1]. They are relative to the size of the image.
"""
XYWH_REL = 3
"""
Not yet supported!
(x0, y0, w, h) in range [0, 1]. They are relative to the size of the image.
"""
XYWHA_ABS = 4
"""
(xc, yc, w, h, a) in absolute floating points coordinates.
(xc, yc) is the center of the rotated box, and the angle a is in degrees ccw.
"""
@staticmethod
def convert(box: _RawBoxType, from_mode: "BoxMode", to_mode: "BoxMode") -> _RawBoxType:
"""
Args:
box: can be a k-tuple, k-list or an Nxk array/tensor, where k = 4 or 5
from_mode, to_mode (BoxMode)
Returns:
The converted box of the same type.
"""
if from_mode == to_mode:
return box
original_type = type(box)
is_numpy = isinstance(box, np.ndarray)
single_box = isinstance(box, (list, tuple))
if single_box:
assert len(box) == 4 or len(box) == 5, (
"BoxMode.convert takes either a k-tuple/list or an Nxk array/tensor,"
" where k == 4 or 5"
)
arr = torch.tensor(box)[None, :]
else:
# avoid modifying the input box
if is_numpy:
arr = torch.from_numpy(np.asarray(box)).clone()
else:
arr = box.clone()
assert to_mode not in [BoxMode.XYXY_REL, BoxMode.XYWH_REL] and from_mode not in [
BoxMode.XYXY_REL,
BoxMode.XYWH_REL,
], "Relative mode not yet supported!"
if from_mode == BoxMode.XYWHA_ABS and to_mode == BoxMode.XYXY_ABS:
assert (
arr.shape[-1] == 5
), "The last dimension of input shape must be 5 for XYWHA format"
original_dtype = arr.dtype
arr = arr.double()
w = arr[:, 2]
h = arr[:, 3]
a = arr[:, 4]
c = torch.abs(torch.cos(a * math.pi / 180.0))
s = torch.abs(torch.sin(a * math.pi / 180.0))
# This basically computes the horizontal bounding rectangle of the rotated box
new_w = c * w + s * h
new_h = c * h + s * w
# convert center to top-left corner
arr[:, 0] -= new_w / 2.0
arr[:, 1] -= new_h / 2.0
# bottom-right corner
arr[:, 2] = arr[:, 0] + new_w
arr[:, 3] = arr[:, 1] + new_h
arr = arr[:, :4].to(dtype=original_dtype)
elif from_mode == BoxMode.XYWH_ABS and to_mode == BoxMode.XYWHA_ABS:
original_dtype = arr.dtype
arr = arr.double()
arr[:, 0] += arr[:, 2] / 2.0
arr[:, 1] += arr[:, 3] / 2.0
angles = torch.zeros((arr.shape[0], 1), dtype=arr.dtype)
arr = torch.cat((arr, angles), axis=1).to(dtype=original_dtype)
else:
if to_mode == BoxMode.XYXY_ABS and from_mode == BoxMode.XYWH_ABS:
arr[:, 2] += arr[:, 0]
arr[:, 3] += arr[:, 1]
elif from_mode == BoxMode.XYXY_ABS and to_mode == BoxMode.XYWH_ABS:
arr[:, 2] -= arr[:, 0]
arr[:, 3] -= arr[:, 1]
else:
raise NotImplementedError(
"Conversion from BoxMode {} to {} is not supported yet".format(
from_mode, to_mode
)
)
if single_box:
return original_type(arr.flatten().tolist())
if is_numpy:
return arr.numpy()
else:
return arr
class Boxes:
"""
This structure stores a list of boxes as a Nx4 torch.Tensor.
It supports some common methods about boxes
(`area`, `clip`, `nonempty`, etc),
and also behaves like a Tensor
(support indexing, `to(device)`, `.device`, and iteration over all boxes)
Attributes:
tensor (torch.Tensor): float matrix of Nx4. Each row is (x1, y1, x2, y2).
"""
def __init__(self, tensor: torch.Tensor):
"""
Args:
tensor (Tensor[float]): a Nx4 matrix. Each row is (x1, y1, x2, y2).
"""
device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device)
if tensor.numel() == 0:
# Use reshape, so we don't end up creating a new tensor that does not depend on
# the inputs (and consequently confuses jit)
tensor = tensor.reshape((-1, 4)).to(dtype=torch.float32, device=device)
assert tensor.dim() == 2 and tensor.size(-1) == 4, tensor.size()
self.tensor = tensor
def clone(self) -> "Boxes":
"""
Clone the Boxes.
Returns:
Boxes
"""
return Boxes(self.tensor.clone())
def to(self, device: torch.device):
# Boxes are assumed float32 and does not support to(dtype)
return Boxes(self.tensor.to(device=device))
def area(self) -> torch.Tensor:
"""
Computes the area of all the boxes.
Returns:
torch.Tensor: a vector with areas of each box.
"""
box = self.tensor
area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1])
return area
def clip(self, box_size: Tuple[int, int]) -> None:
"""
Clip (in place) the boxes by limiting x coordinates to the range [0, width]
and y coordinates to the range [0, height].
Args:
box_size (height, width): The clipping box's size.
"""
assert torch.isfinite(self.tensor).all(), "Box tensor contains infinite or NaN!"
h, w = box_size
x1 = self.tensor[:, 0].clamp(min=0, max=w)
y1 = self.tensor[:, 1].clamp(min=0, max=h)
x2 = self.tensor[:, 2].clamp(min=0, max=w)
y2 = self.tensor[:, 3].clamp(min=0, max=h)
self.tensor = torch.stack((x1, y1, x2, y2), dim=-1)
def nonempty(self, threshold: float = 0.0) -> torch.Tensor:
"""
Find boxes that are non-empty.
A box is considered empty, if either of its side is no larger than threshold.
Returns:
Tensor:
a binary vector which represents whether each box is empty
(False) or non-empty (True).
"""
box = self.tensor
widths = box[:, 2] - box[:, 0]
heights = box[:, 3] - box[:, 1]
keep = (widths > threshold) & (heights > threshold)
return keep
def __getitem__(self, item) -> "Boxes":
"""<fim_suffix><fim_middle>
Args:
item: int, slice, or a BoolTensor
Returns:
Boxes: Create a new :class:`Boxes` by indexing.
The following usage are allowed:
1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box.
2. `new_boxes = boxes[2:10]`: return a slice of boxes.
3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor
with `length = len(boxes)`. Nonzero elements in the vector will be selected.
Note that the returned Boxes might share storage with this Boxes,
subject to Pytorch's indexing semantics.
"""
|
Args:
item: int, slice, or a BoolTensor
Returns:
Boxes: Create a new :class:`Boxes` by indexing.
The following usage are allowed:
1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box.
2. `new_boxes = boxes[2:10]`: return a slice of boxes.
3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor
with `length = len(boxes)`. Nonzero elements in the vector will be selected.
Note that the returned Boxes might share storage with this Boxes,
subject to Pytorch's indexing semantics.
"""
|
BLOCK_COMMENT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/structures/instances.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates.
import itertools
from typing import Any, Dict, List, Tuple, Union
import torch
class Instances:
"""
This class represents a list of instances in an image.
It stores the attributes of instances (e.g., boxes, masks, labels, scores) as "fields".
All fields must have the same ``__len__`` which is the number of instances.
All other (non-field) attributes of this class are considered private:
they must start with '_' and are not modifiable by a user.
Some basic usage:
1. Set/get/check a field:
.. code-block:: python
instances.gt_boxes = Boxes(...)
print(instances.pred_masks) # a tensor of shape (N, H, W)
print('gt_masks' in instances)
2. ``len(instances)`` returns the number of instances
3. Indexing: ``instances[indices]`` will apply the indexing on all the fields
and returns a new :class:`Instances`.
Typically, ``indices`` is a integer vector of indices,
or a binary mask of length ``num_instances``
.. code-block:: python
category_3_detections = instances[instances.pred_classes == 3]
confident_detections = instances[instances.scores > 0.9]
"""
def __init__(self, image_size: Tuple[int, int], **kwargs: Any):
"""<fim_suffix><fim_middle>
Args:
image_size (height, width): the spatial size of the image.
kwargs: fields to add to this `Instances`.
"""
|
Args:
image_size (height, width): the spatial size of the image.
kwargs: fields to add to this `Instances`.
"""
|
BLOCK_COMMENT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/structures/masks.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates.
import copy
import itertools
import numpy as np
from typing import Any, Iterator, List, Union
import pycocotools.mask as mask_util
import torch
from torch import device
from detectron2.layers.roi_align import ROIAlign
from detectron2.utils.memory import retry_if_cuda_oom
from .boxes import Boxes
def polygon_area(x, y):
# Using the shoelace formula
# https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
def polygons_to_bitmask(polygons: List[np.ndarray], height: int, width: int) -> np.ndarray:
"""
Args:
polygons (list[ndarray]): each array has shape (Nx2,)
height, width (int)
Returns:
ndarray: a bool mask of shape (height, width)
"""
if len(polygons) == 0:
# COCOAPI does not support empty polygons
return np.zeros((height, width)).astype(np.bool)
rles = mask_util.frPyObjects(polygons, height, width)
rle = mask_util.merge(rles)
return mask_util.decode(rle).astype(np.bool)
def rasterize_polygons_within_box(
polygons: List[np.ndarray], box: np.ndarray, mask_size: int
) -> torch.Tensor:
"""
Rasterize the polygons into a mask image and
crop the mask content in the given box.
The cropped mask is resized to (mask_size, mask_size).
This function is used when generating training targets for mask head in Mask R-CNN.
Given original ground-truth masks for an image, new ground-truth mask
training targets in the size of `mask_size x mask_size`
must be provided for each predicted box. This function will be called to
produce such targets.
Args:
polygons (list[ndarray[float]]): a list of polygons, which represents an instance.
box: 4-element numpy array
mask_size (int):
Returns:
Tensor: BoolTensor of shape (mask_size, mask_size)
"""
# 1. Shift the polygons w.r.t the boxes
w, h = box[2] - box[0], box[3] - box[1]
polygons = copy.deepcopy(polygons)
for p in polygons:
p[0::2] = p[0::2] - box[0]
p[1::2] = p[1::2] - box[1]
# 2. Rescale the polygons to the new box size
# max() to avoid division by small number
ratio_h = mask_size / max(h, 0.1)
ratio_w = mask_size / max(w, 0.1)
if ratio_h == ratio_w:
for p in polygons:
p *= ratio_h
else:
for p in polygons:
p[0::2] *= ratio_w
p[1::2] *= ratio_h
# 3. Rasterize the polygons with coco api
mask = polygons_to_bitmask(polygons, mask_size, mask_size)
mask = torch.from_numpy(mask)
return mask
class BitMasks:
"""
This class stores the segmentation masks for all objects in one image, in
the form of bitmaps.
Attributes:
tensor: bool Tensor of N,H,W, representing N instances in the image.
"""
def __init__(self, tensor: Union[torch.Tensor, np.ndarray]):
"""
Args:
tensor: bool Tensor of N,H,W, representing N instances in the image.
"""
device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
tensor = torch.as_tensor(tensor, dtype=torch.bool, device=device)
assert tensor.dim() == 3, tensor.size()
self.image_size = tensor.shape[1:]
self.tensor = tensor
@torch.jit.unused
def to(self, *args: Any, **kwargs: Any) -> "BitMasks":
return BitMasks(self.tensor.to(*args, **kwargs))
@property
def device(self) -> torch.device:
return self.tensor.device
@torch.jit.unused
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "BitMasks":
"""
Returns:
BitMasks: Create a new :class:`BitMasks` by indexing.
The following usage are allowed:
1. `new_masks = masks[3]`: return a `BitMasks` which contains only one mask.
2. `new_masks = masks[2:10]`: return a slice of masks.
3. `new_masks = masks[vector]`, where vector is a torch.BoolTensor
with `length = len(masks)`. Nonzero elements in the vector will be selected.
Note that the returned object might share storage with this object,
subject to Pytorch's indexing semantics.
"""
if isinstance(item, int):
return BitMasks(self.tensor[item].unsqueeze(0))
m = self.tensor[item]
assert m.dim() == 3, "Indexing on BitMasks with {} returns a tensor with shape {}!".format(
item, m.shape
)
return BitMasks(m)
@torch.jit.unused
def __iter__(self) -> torch.Tensor:
yield from self.tensor
@torch.jit.unused
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.tensor))
return s
def __len__(self) -> int:
return self.tensor.shape[0]
def nonempty(self) -> torch.Tensor:
"""
Find masks that are non-empty.
Returns:
Tensor: a BoolTensor which represents
whether each mask is empty (False) or non-empty (True).
"""
return self.tensor.flatten(1).any(dim=1)
@staticmethod
def from_polygon_masks(
polygon_masks: Union["PolygonMasks", List[List[np.ndarray]]], height: int, width: int
) -> "BitMasks":
"""
Args:
polygon_masks (list[list[ndarray]] or PolygonMasks)
height, width (int)
"""
if isinstance(polygon_masks, PolygonMasks):
polygon_masks = polygon_masks.polygons
masks = [polygons_to_bitmask(p, height, width) for p in polygon_masks]
if len(masks):
return BitMasks(torch.stack([torch.from_numpy(x) for x in masks]))
else:
return BitMasks(torch.empty(0, height, width, dtype=torch.bool))
@staticmethod
def from_roi_masks(roi_masks: "ROIMasks", height: int, width: int) -> "BitMasks":
"""
Args:
roi_masks:
height, width (int):
"""
return roi_masks.to_bitmasks(height, width)
def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:
"""
Crop each bitmask by the given box, and resize results to (mask_size, mask_size).
This can be used to prepare training targets for Mask R-CNN.
It has less reconstruction error compared to rasterization with polygons.
However we observe no difference in accuracy,
but BitMasks requires more memory to store all the masks.
Args:
boxes (Tensor): Nx4 tensor storing the boxes for each mask
mask_size (int): the size of the rasterized mask.
Returns:
Tensor:
A bool tensor of shape (N, mask_size, mask_size), where
N is the number of predicted boxes for this image.
"""
assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self))
device = self.tensor.device
batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None]
rois = torch.cat([batch_inds, boxes], dim=1) # Nx5
bit_masks = self.tensor.to(dtype=torch.float32)
rois = rois.to(device=device)
output = (
ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True)
.forward(bit_masks[:, None, :, :], rois)
.squeeze(1)
)
output = output >= 0.5
return output
def get_bounding_boxes(self) -> Boxes:
"""
Returns:
Boxes: tight bounding boxes around bitmasks.
If a mask is empty, it's bounding box will be all zero.
"""
boxes = torch.zeros(self.tensor.shape[0], 4, dtype=torch.float32)
x_any = torch.any(self.tensor, dim=1)
y_any = torch.any(self.tensor, dim=2)
for idx in range(self.tensor.shape[0]):
x = torch.where(x_any[idx, :])[0]
y = torch.where(y_any[idx, :])[0]
if len(x) > 0 and len(y) > 0:
boxes[idx, :] = torch.as_tensor(
[x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=torch.float32
)
return Boxes(boxes)
@staticmethod
def cat(bitmasks_list: List["BitMasks"]) -> "BitMasks":
"""
Concatenates a list of BitMasks into a single BitMasks
Arguments:
bitmasks_list (list[BitMasks])
Returns:
BitMasks: the concatenated BitMasks
"""
assert isinstance(bitmasks_list, (list, tuple))
assert len(bitmasks_list) > 0
assert all(isinstance(bitmask, BitMasks) for bitmask in bitmasks_list)
cat_bitmasks = type(bitmasks_list[0])(torch.cat([bm.tensor for bm in bitmasks_list], dim=0))
return cat_bitmasks
class PolygonMasks:
"""
This class stores the segmentation masks for all objects in one image, in the form of polygons.
Attributes:
polygons: list[list[ndarray]]. Each ndarray is a float64 vector representing a polygon.
"""
def __init__(self, polygons: List[List[Union[torch.Tensor, np.ndarray]]]):
"""<fim_suffix><fim_middle>
Arguments:
polygons (list[list[np.ndarray]]): The first
level of the list correspond to individual instances,
the second level to all the polygons that compose the
instance, and the third level to the polygon coordinates.
The third level array should have the format of
[x0, y0, x1, y1, ..., xn, yn] (n >= 3).
"""
|
Arguments:
polygons (list[list[np.ndarray]]): The first
level of the list correspond to individual instances,
the second level to all the polygons that compose the
instance, and the third level to the polygon coordinates.
The third level array should have the format of
[x0, y0, x1, y1, ..., xn, yn] (n >= 3).
"""
|
BLOCK_COMMENT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/config/instantiate.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates.
import dataclasses
import logging
from collections import abc
from typing import Any
from detectron2.utils.registry import _convert_target_to_string, locate
__all__ = ["dump_dataclass", "instantiate"]
def dump_dataclass(obj: Any):
"""
Dump a dataclass recursively into a dict that can be later instantiated.
Args:
obj: a dataclass object
Returns:
dict
"""
assert dataclasses.is_dataclass(obj) and not isinstance(
obj, type
), "dump_dataclass() requires an instance of a dataclass."
ret = {"_target_": _convert_target_to_string(type(obj))}
for f in dataclasses.fields(obj):
v = getattr(obj, f.name)
if dataclasses.is_dataclass(v):
v = dump_dataclass(v)
if isinstance(v, (list, tuple)):
v = [dump_dataclass(x) if dataclasses.is_dataclass(x) else x for x in v]
ret[f.name] = v
return ret
def instantiate(cfg):
"""<fim_suffix><fim_middle>
Recursively instantiate objects defined in dictionaries by
"_target_" and arguments.
Args:
cfg: a dict-like object with "_target_" that defines the caller, and
other keys that define the arguments
Returns:
object instantiated by cfg
"""
|
Recursively instantiate objects defined in dictionaries by
"_target_" and arguments.
Args:
cfg: a dict-like object with "_target_" that defines the caller, and
other keys that define the arguments
Returns:
object instantiated by cfg
"""
|
BLOCK_COMMENT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/tracking/bbox_iou_tracker.py<fim_prefix>#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import copy
from typing import List
import numpy as np
import torch
from detectron2.config import configurable
from detectron2.structures import Boxes, Instances
from detectron2.structures.boxes import pairwise_iou
from ..config.config import CfgNode as CfgNode_
from .base_tracker import BaseTracker, TRACKER_HEADS_REGISTRY
@TRACKER_HEADS_REGISTRY.register()
class BBoxIOUTracker(BaseTracker):
"""
A bounding box tracker to assign ID based on IoU between current and previous instances
"""
@configurable
def __init__(
self,
*,
video_height: int,
video_width: int,
max_num_instances: int = 200,
max_lost_frame_count: int = 0,
min_box_rel_dim: float = 0.02,
min_instance_period: int = 1,
track_iou_threshold: float = 0.5,
**kwargs
):
"""
Args:
video_height: height the video frame
video_width: width of the video frame
max_num_instances: maximum number of id allowed to be tracked
max_lost_frame_count: maximum number of frame an id can lost tracking
exceed this number, an id is considered as lost
forever
min_box_rel_dim: a percentage, smaller than this dimension, a bbox is
removed from tracking
min_instance_period: an instance will be shown after this number of period
since its first showing up in the video
track_iou_threshold: iou threshold, below this number a bbox pair is removed
from tracking
"""
super().__init__(**kwargs)
self._video_height = video_height
self._video_width = video_width
self._max_num_instances = max_num_instances
self._max_lost_frame_count = max_lost_frame_count
self._min_box_rel_dim = min_box_rel_dim
self._min_instance_period = min_instance_period
self._track_iou_threshold = track_iou_threshold
@classmethod
def from_config(cls, cfg: CfgNode_):
"""
Old style initialization using CfgNode
Args:
cfg: D2 CfgNode, config file
Return:
dictionary storing arguments for __init__ method
"""
assert "VIDEO_HEIGHT" in cfg.TRACKER_HEADS
assert "VIDEO_WIDTH" in cfg.TRACKER_HEADS
video_height = cfg.TRACKER_HEADS.get("VIDEO_HEIGHT")
video_width = cfg.TRACKER_HEADS.get("VIDEO_WIDTH")
max_num_instances = cfg.TRACKER_HEADS.get("MAX_NUM_INSTANCES", 200)
max_lost_frame_count = cfg.TRACKER_HEADS.get("MAX_LOST_FRAME_COUNT", 0)
min_box_rel_dim = cfg.TRACKER_HEADS.get("MIN_BOX_REL_DIM", 0.02)
min_instance_period = cfg.TRACKER_HEADS.get("MIN_INSTANCE_PERIOD", 1)
track_iou_threshold = cfg.TRACKER_HEADS.get("TRACK_IOU_THRESHOLD", 0.5)
return {
"_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker",
"video_height": video_height,
"video_width": video_width,
"max_num_instances": max_num_instances,
"max_lost_frame_count": max_lost_frame_count,
"min_box_rel_dim": min_box_rel_dim,
"min_instance_period": min_instance_period,
"track_iou_threshold": track_iou_threshold
}
def update(self, instances: Instances) -> Instances:
"""
See BaseTracker description
"""
if instances.has("pred_keypoints"):
raise NotImplementedError("Need to add support for keypoints")
instances = self._initialize_extra_fields(instances)
if self._prev_instances is not None:
# calculate IoU of all bbox pairs
iou_all = pairwise_iou(
boxes1=instances.pred_boxes,
boxes2=self._prev_instances.pred_boxes,
)
# sort IoU in descending order
bbox_pairs = self._create_prediction_pairs(instances, iou_all)
# assign previous ID to current bbox if IoU > track_iou_threshold
self._reset_fields()
for bbox_pair in bbox_pairs:
idx = bbox_pair["idx"]
prev_id = bbox_pair["prev_id"]
if idx in self._matched_idx \
or prev_id in self._matched_ID \
or bbox_pair["IoU"] < self._track_iou_threshold:
continue
instances.ID[idx] = prev_id
instances.ID_period[idx] = bbox_pair["prev_period"] + 1
instances.lost_frame_count[idx] = 0
self._matched_idx.add(idx)
self._matched_ID.add(prev_id)
self._untracked_prev_idx.remove(bbox_pair["prev_idx"])
instances = self._assign_new_id(instances)
instances = self._merge_untracked_instances(instances)
self._prev_instances = copy.deepcopy(instances)
return instances
def _create_prediction_pairs(
self, instances: Instances, iou_all: np.ndarray
) -> List:
"""
For all instances in previous and current frames, create pairs. For each
pair, store index of the instance in current frame predcitions, index in
previous predictions, ID in previous predictions, IoU of the bboxes in this
pair, period in previous predictions.
Args:
instances: D2 Instances, for predictions of the current frame
iou_all: IoU for all bboxes pairs
Return:
A list of IoU for all pairs
"""
bbox_pairs = []
for i in range(len(instances)):
for j in range(len(self._prev_instances)):
bbox_pairs.append(
{
"idx": i,
"prev_idx": j,
"prev_id": self._prev_instances.ID[j],
"IoU": iou_all[i, j],
"prev_period": self._prev_instances.ID_period[j],
}
)
return bbox_pairs
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""<fim_suffix><fim_middle>
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
|
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
|
BLOCK_COMMENT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/tracking/bbox_iou_tracker.py<fim_prefix>#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import copy
from typing import List
import numpy as np
import torch
from detectron2.config import configurable
from detectron2.structures import Boxes, Instances
from detectron2.structures.boxes import pairwise_iou
from ..config.config import CfgNode as CfgNode_
from .base_tracker import BaseTracker, TRACKER_HEADS_REGISTRY
@TRACKER_HEADS_REGISTRY.register()
class BBoxIOUTracker(BaseTracker):
"""
A bounding box tracker to assign ID based on IoU between current and previous instances
"""
@configurable
def __init__(
self,
*,
video_height: int,
video_width: int,
max_num_instances: int = 200,
max_lost_frame_count: int = 0,
min_box_rel_dim: float = 0.02,
min_instance_period: int = 1,
track_iou_threshold: float = 0.5,
**kwargs
):
"""
Args:
video_height: height the video frame
video_width: width of the video frame
max_num_instances: maximum number of id allowed to be tracked
max_lost_frame_count: maximum number of frame an id can lost tracking
exceed this number, an id is considered as lost
forever
min_box_rel_dim: a percentage, smaller than this dimension, a bbox is
removed from tracking
min_instance_period: an instance will be shown after this number of period
since its first showing up in the video
track_iou_threshold: iou threshold, below this number a bbox pair is removed
from tracking
"""
super().__init__(**kwargs)
self._video_height = video_height
self._video_width = video_width
self._max_num_instances = max_num_instances
self._max_lost_frame_count = max_lost_frame_count
self._min_box_rel_dim = min_box_rel_dim
self._min_instance_period = min_instance_period
self._track_iou_threshold = track_iou_threshold
@classmethod
def from_config(cls, cfg: CfgNode_):
"""
Old style initialization using CfgNode
Args:
cfg: D2 CfgNode, config file
Return:
dictionary storing arguments for __init__ method
"""
assert "VIDEO_HEIGHT" in cfg.TRACKER_HEADS
assert "VIDEO_WIDTH" in cfg.TRACKER_HEADS
video_height = cfg.TRACKER_HEADS.get("VIDEO_HEIGHT")
video_width = cfg.TRACKER_HEADS.get("VIDEO_WIDTH")
max_num_instances = cfg.TRACKER_HEADS.get("MAX_NUM_INSTANCES", 200)
max_lost_frame_count = cfg.TRACKER_HEADS.get("MAX_LOST_FRAME_COUNT", 0)
min_box_rel_dim = cfg.TRACKER_HEADS.get("MIN_BOX_REL_DIM", 0.02)
min_instance_period = cfg.TRACKER_HEADS.get("MIN_INSTANCE_PERIOD", 1)
track_iou_threshold = cfg.TRACKER_HEADS.get("TRACK_IOU_THRESHOLD", 0.5)
return {
"_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker",
"video_height": video_height,
"video_width": video_width,
"max_num_instances": max_num_instances,
"max_lost_frame_count": max_lost_frame_count,
"min_box_rel_dim": min_box_rel_dim,
"min_instance_period": min_instance_period,
"track_iou_threshold": track_iou_threshold
}
def update(self, instances: Instances) -> Instances:
"""
See BaseTracker description
"""
if instances.has("pred_keypoints"):
raise NotImplementedError("Need to add support for keypoints")
instances = self._initialize_extra_fields(instances)
if self._prev_instances is not None:
# calculate IoU of all bbox pairs
iou_all = pairwise_iou(
boxes1=instances.pred_boxes,
boxes2=self._prev_instances.pred_boxes,
)
# sort IoU in descending order
bbox_pairs = self._create_prediction_pairs(instances, iou_all)
# assign previous ID to current bbox if IoU > track_iou_threshold
self._reset_fields()
for bbox_pair in bbox_pairs:
idx = bbox_pair["idx"]
prev_id = bbox_pair["prev_id"]
if idx in self._matched_idx \
or prev_id in self._matched_ID \
or bbox_pair["IoU"] < self._track_iou_threshold:
continue
instances.ID[idx] = prev_id
instances.ID_period[idx] = bbox_pair["prev_period"] + 1
instances.lost_frame_count[idx] = 0
self._matched_idx.add(idx)
self._matched_ID.add(prev_id)
self._untracked_prev_idx.remove(bbox_pair["prev_idx"])
instances = self._assign_new_id(instances)
instances = self._merge_untracked_instances(instances)
self._prev_instances = copy.deepcopy(instances)
return instances
def _create_prediction_pairs(
self, instances: Instances, iou_all: np.ndarray
) -> List:
"""
For all instances in previous and current frames, create pairs. For each
pair, store index of the instance in current frame predcitions, index in
previous predictions, ID in previous predictions, IoU of the bboxes in this
pair, period in previous predictions.
Args:
instances: D2 Instances, for predictions of the current frame
iou_all: IoU for all bboxes pairs
Return:
A list of IoU for all pairs
"""
bbox_pairs = []
for i in range(len(instances)):
for j in range(len(self._prev_instances)):
bbox_pairs.append(
{
"idx": i,
"prev_idx": j,
"prev_id": self._prev_instances.ID[j],
"IoU": iou_all[i, j],
"prev_period": self._prev_instances.ID_period[j],
}
)
return bbox_pairs
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
if not instances.has("ID"):
instances.set("ID", [None] * len(instances))
if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
if not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances))
if self._prev_instances is None:
instances.ID = list(range(len(instances)))
self._id_count<fim_suffix><fim_middle> += len(instances)
|
+= len(instances)
|
STATEMENT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/external/davis2017-evaluation/davis2017/metrics.py<fim_prefix>import math
import numpy as np
import cv2
def db_eval_iou(annotation, segmentation, void_pixels=None):
""" Compute region similarity as the Jaccard Index.
Arguments:
annotation (ndarray): binary annotation map.
segmentation (ndarray): binary segmentation map.
void_pixels (ndarray): optional mask with void pixels
Return:
jaccard (float): region similarity
"""
assert annotation.shape == segmentation.shape, \
f'Annotation({annotation.shape}) and segmentation:{segmentation.shape} dimensions do not match.'
annotation = annotation.astype(np.bool)
segmentation = segmentation.astype(np.bool)
if void_pixels is not None:
assert annotation.shape == void_pixels.shape, \
f'Annotation({annotation.shape}) and void pixels:{void_pixels.shape} dimensions do not match.'
void_pixels = void_pixels.astype(np.bool)
else:
void_pixels = np.zeros_like(segmentation)
# Intersection between all sets
inters<fim_suffix><fim_middle> = np.sum((segmentation & annotation) & np.logical_not(void_pixels), axis=(-2, -1))
|
= np.sum((segmentation & annotation) & np.logical_not(void_pixels), axis=(-2, -1))
|
STATEMENT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/layers/losses.py<fim_prefix>import math
import torch
def diou_loss(
boxes1: torch.Tensor,
boxes2: torch.Tensor,
reduction: str = "none",
eps: float = 1e-7,
) -> torch.Tensor:
"""
Distance Intersection over Union Loss (Zhaohui Zheng et. al)
https://arxiv.org/abs/1911.08287
Args:
boxes1, boxes2 (Tensor): box locations in XYXY format, shape (N, 4) or (4,).
reduction: 'none' | 'mean' | 'sum'
'none': No reduction will be applied to the output.
'mean': The output will be averaged.
'sum': The output will be summed.
eps (float): small number to prevent division by zero
"""
x1, y1, x2, y2 = boxes1.unbind(dim=-1)
x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1)
# TODO: use torch._assert_async() when pytorch 1.8 support is dropped
assert (x2 >= x1).all(), "bad box: x1 larger than x2"
assert (y2 >= y1).all(), "bad box: y1 larger than y2"
# Intersection keypoints
xkis1 = torch.max(x1, x1g)
ykis1 = torch.max(y1, y1g)
xkis2 = torch.min(x2, x2g)
ykis2 = torch.min(y2, y2g)
intsct = torch.zeros_like(x1)
mask = (ykis2 > ykis1) & (xkis2 > xkis1)
intsct[mask] = (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask])
union = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g) - intsct + eps
iou = intsct / union
# smallest enclosing box
xc1 = torch.min(x1, x1g)
yc1 = torch.min(y1, y1g)
xc2 = torch.max(x2, x2g)
yc2 = torch.max(y2, y2g)
diag_len = ((xc2 - xc1) ** 2) + ((yc2 - yc1) ** 2) + eps
# centers of boxes
x_p = (x2 + x1) / 2
y_p = (y2 + y1) / 2
x_g = (x1g + x2g) / 2
y_g = (y1g + y2g) / 2
distance = ((x_p - x_g) ** 2) + ((y_p - y_g) ** 2)
# Eqn. (7)
loss = 1 - iou + (distance / diag_len)
if reduction == "mean":
loss = loss.mean() if loss.numel() > 0 else 0.0 * loss.sum()
elif reduction == "sum":
loss = loss.sum()
return loss
def ciou_loss(
boxes1: torch.Tensor,
boxes2: torch.Tensor,
reduction: str = "none",
eps: float = 1e-7,
) -> torch.Tensor:
"""
Complete Intersection over Union Loss (Zhaohui Zheng et. al)
https://arxiv.org/abs/1911.08287
Args:
boxes1, boxes2 (Tensor): box locations in XYXY format, shape (N, 4) or (4,).
reduction: 'none' | 'mean' | 'sum'
'none': No reduction will be applied to the output.
'mean': The output will be averaged.
'sum': The output will be summed.
eps (float): small number to prevent division by zero
"""
x1, y1, x2, y2 = boxes1.unbind(dim=-1)
x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1)
# TODO: use torch._assert_async() when pytorch 1.8 support is dropped
assert (x2 >= x1).all(), "bad box: x1 larger than x2"
assert (y2 >= y1).all(), "bad box: y1 larger than y2"
# Intersection keypoints
xkis1 = torch.max(x1, x1g)
ykis1 = torch.max(y1, y1g)
xkis2 = torch.min(x2, x2g)
ykis2 = torch.min(y2, y2g)
intsct = torch.zeros_like(x1)
mask = (ykis2 > ykis1) & (xkis2 > xkis1)
intsct[mask]<fim_suffix><fim_middle> = (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask])
|
= (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask])
|
STATEMENT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/layers/losses.py<fim_prefix>import math
import torch
def diou_loss(
boxes1: torch.Tensor,
boxes2: torch.Tensor,
reduction: str = "none",
eps: float = 1e-7,
) -> torch.Tensor:
"""
Distance Intersection over Union Loss (Zhaohui Zheng et. al)
https://arxiv.org/abs/1911.08287
Args:
boxes1, boxes2 (Tensor): box locations in XYXY format, shape (N, 4) or (4,).
reduction: 'none' | 'mean' | 'sum'
'none': No reduction will be applied to the output.
'mean': The output will be averaged.
'sum': The output will be summed.
eps (float): small number to prevent division by zero
"""
x1, y1, x2, y2 = boxes1.unbind(dim=-1)
x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1)
# TODO: use torch._assert_async() when pytorch 1.8 support is dropped
assert (x2 >= x1).all(), "bad box: x1 larger than x2"
assert (y2 >= y1).all(), "bad box: y1 larger than y2"
# Intersection keypoints
xkis1 = torch.max(x1, x1g)
ykis1 = torch.max(y1, y1g)
xkis2 = torch.min(x2, x2g)
ykis2 = torch.min(y2, y2g)
intsct = torch.zeros_like(x1)
mask = (ykis2 > ykis1) & (xkis2 > xkis1)
intsct[mask] = (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask])
union = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g) - intsct + eps
iou = intsct / union
# smallest enclosing box
xc1 = torch.min(x1, x1g)
yc1 = torch.min(y1, y1g)
xc2 = torch.max(x2, x2g)
yc2 = torch.max(y2, y2g)
diag_len = ((xc2 - xc1) ** 2) + ((yc2 - yc1) ** 2) + eps
# centers of boxes
x_p = (x2 + x1) / 2
y_p = (y2 + y1) / 2
x_g = (x1g + x2g) / 2
y_g = (y1g + y2g) / 2
distance = ((x_p - x_g) ** 2) + ((y_p - y_g) ** 2)
# Eqn. (7)
loss = 1 - iou + (distance / diag_len)
if reduction == "mean":
loss = loss.mean() if loss.numel() > 0 else 0.0 * loss.sum()
elif reduction == "sum":
loss = loss.sum()
return loss
def ciou_loss(
boxes1: torch.Tensor,
boxes2: torch.Tensor,
reduction: str = "none",
eps: float = 1e-7,
) -> torch.Tensor:
"""
Complete Intersection over Union Loss (Zhaohui Zheng et. al)
https://arxiv.org/abs/1911.08287
Args:
boxes1, boxes2 (Tensor): box locations in XYXY format, shape (N, 4) or (4,).
reduction: 'none' | 'mean' | 'sum'
'none': No reduction will be applied to the output.
'mean': The output will be averaged.
'sum': The output will be summed.
eps (float): small number to prevent division by zero
"""
x1, y1, x2, y2 = boxes1.unbind(dim=-1)
x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1)
# TODO: use torch._assert_async() when pytorch 1.8 support is dropped
assert (x2 >= x1).all(), "bad box: x1 larger than x2"
assert (y2 >= y1).all(), "bad box: y1 larger than y2"
# Intersection keypoints
xkis1 = torch.max(x1, x1g)
ykis1 = torch.max(y1, y1g)
xkis2 = torch.min(x2, x2g)
ykis2 = torch.min(y2, y2g)
intsct = torch.zeros_like(x1)
mask = (ykis2 > ykis1) & (xkis2 > xkis1)
intsct[mask] = (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask])
union = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g) - intsct + eps
iou = intsct / union
# smallest enclosing box
xc1<fim_suffix><fim_middle> = torch.min(x1, x1g)
|
= torch.min(x1, x1g)
|
STATEMENT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/tracking/hungarian_tracker.py<fim_prefix>#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import copy
import numpy as np
import torch
from detectron2.structures import Boxes, Instances
from .base_tracker import BaseTracker
from scipy.optimize import linear_sum_assignment
from ..config.config import CfgNode as CfgNode_
from typing import Dict
from detectron2.config import configurable
class BaseHungarianTracker(BaseTracker):
"""
A base class for all Hungarian trackers
"""
@configurable
def __init__(
self,
video_height: int,
video_width: int,
max_num_instances: int = 200,
max_lost_frame_count: int = 0,
min_box_rel_dim: float = 0.02,
min_instance_period: int = 1,
**kwargs
):
"""
Args:
video_height: height the video frame
video_width: width of the video frame
max_num_instances: maximum number of id allowed to be tracked
max_lost_frame_count: maximum number of frame an id can lost tracking
exceed this number, an id is considered as lost
forever
min_box_rel_dim: a percentage, smaller than this dimension, a bbox is
removed from tracking
min_instance_period: an instance will be shown after this number of period
since its first showing up in the video
"""
super().__init__(**kwargs)
self._video_height = video_height
self._video_width = video_width
self._max_num_instances = max_num_instances
self._max_lost_frame_count = max_lost_frame_count
self._min_box_rel_dim = min_box_rel_dim
self._min_instance_period = min_instance_period
@classmethod
def from_config(cls, cfg: CfgNode_) -> Dict:
raise NotImplementedError("Calling HungarianTracker::from_config")
def build_cost_matrix(self, instances: Instances, prev_instances: Instances) -> np.ndarray:
raise NotImplementedError("Calling HungarianTracker::build_matrix")
def update(self, instances: Instances) -> Instances:
if instances.has("pred_keypoints"):
raise NotImplementedError("Need to add support for keypoints")
instances = self._initialize_extra_fields(instances)
if self._prev_instances is not None:
self._untracked_prev_idx = set(range(len(self._prev_instances)))
cost_matrix = self.build_cost_matrix(instances, self._prev_instances)
matched_idx, matched_prev_idx = linear_sum_assignment(cost_matrix)
instances = self._process_matched_idx(instances, matched_idx, matched_prev_idx)
instances = self._process_unmatched_idx(instances, matched_idx)
instances = self._process_unmatched_prev_idx(instances, matched_prev_idx)
self._prev_instances = copy.deepcopy(instances)
return instances
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
if not instances.has("ID"):
instances.set("ID", [None] * len(instances))
if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
if not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances))
if self._prev_instances is None:
instances.ID = list(range(len(instances)))
self._id_count += len(instances)
instances.ID_period = [1] * len(instances)
instances.lost_frame_count = [0] * len(instances)
return instances
def _process_matched_idx(
self,
instances: Instances,
matched_idx: np.ndarray,
matched_prev_idx: np.ndarray
) -> Instances:
assert matched_idx.size == matched_prev_idx.size
for i in range(matched_idx.size):
instances.ID[matched_idx[i]] = self._prev_instances.ID[matched_prev_idx[i]]
instances.ID_period[matched_idx[i]] = \
self._prev_instances.ID_period[matched_prev_idx[i]] + 1
instances.lost_frame_count[matched_idx[i]] = 0
return instances
def _process_unmatched_idx(self, instances: Instances, matched_idx: np.ndarray) -> Instances:
untracked_idx = set(range(len(instances))).difference(set(matched_idx))
for idx in untracked_idx:
instances.ID[idx] = self._id_count
self._id_count += 1
instances.ID_period[idx] = 1
instances.lost_frame_count[idx] = 0
return instances
def _process_unmatched_prev_idx(
self,
instances: Instances,
matched_prev_idx:
np.ndarray
) -> Instances:
untracked_instances = Instances(
image_size=instances.image_size,
pred_boxes=[],
pred_masks=[],
pred_classes=[],
scores=[],
ID=[],
ID_period=[],
lost_frame_count=[],
)
prev_bboxes = list(self._prev_instances.pred_boxes)
prev_classes = list(self._prev_instances.pred_classes)
prev_scores = list(self._prev_instances.scores)
prev_ID_period = self._prev_instances.ID_period
if instances.has("pred_masks"):
prev_masks = list(self._prev_instances.pred_masks)
untracked_prev_idx = set(range(len(self._prev_instances))).difference(set(matched_prev_idx))
for idx in untracked_prev_idx:
x_left, y_top, x_right, y_bot = prev_bboxes[idx]
if (
(1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim)
or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim)
or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count
or prev_ID_period[idx] <= self._min_instance_period
):
continue
untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy()))
untracked_instances.pred_classes.append(int(prev_classes[idx]))
untracked_instances.scores.append(float(prev_scores[idx]))
untracked_instances.ID.append(self._prev_instances.ID[idx])
untracked_instances.ID_period.append(self._prev_instances.ID_period[idx])
untracked_instances.lost_frame_count.append(
self._prev_instances.lost_frame_count[idx] + 1
)
if instances.has("pred_masks"):
untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8))
untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes))
untracked_instances.pred_classes<fim_suffix><fim_middle> = torch.IntTensor(untracked_instances.pred_classes)
|
= torch.IntTensor(untracked_instances.pred_classes)
|
STATEMENT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/checkpoint/c2_model_loading.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates.
import copy
import logging
import re
from typing import Dict, List
import torch
from tabulate import tabulate
def convert_basic_c2_names(original_keys):
"""
Apply some basic name conversion to names in C2 weights.
It only deals with typical backbone models.
Args:
original_keys (list[str]):
Returns:
list[str]: The same number of strings matching those in original_keys.
"""
layer_keys = copy.deepcopy(original_keys)
layer_keys = [
{"pred_b": "linear_b", "pred_w": "linear_w"}.get(k, k) for k in layer_keys
] # some hard-coded mappings
layer_keys = [k.replace("_", ".") for k in layer_keys]
layer_keys = [re.sub("\\.b$", ".bias", k) for k in layer_keys]
layer_keys = [re.sub("\\.w$", ".weight", k) for k in layer_keys]
# Uniform both bn and gn names to "norm"
layer_keys = [re.sub("bn\\.s$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.bias$", "norm.bias", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.rm", "norm.running_mean", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.running.mean$", "norm.running_mean", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.riv$", "norm.running_var", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.running.var$", "norm.running_var", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.gamma$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.beta$", "norm.bias", k) for k in layer_keys]
layer_keys = [re.sub("gn\\.s$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("gn\\.bias$", "norm.bias", k) for k in layer_keys]
# stem
layer_keys = [re.sub("^res\\.conv1\\.norm\\.", "conv1.norm.", k) for k in layer_keys]
# to avoid mis-matching with "conv1" in other components (e.g. detection head)
layer_keys = [re.sub("^conv1\\.", "stem.conv1.", k) for k in layer_keys]
# layer1-4 is used by torchvision, however we follow the C2 naming strategy (res2-5)
# layer_keys = [re.sub("^res2.", "layer1.", k) for k in layer_keys]
# layer_keys = [re.sub("^res3.", "layer2.", k) for k in layer_keys]
# layer_keys = [re.sub("^res4.", "layer3.", k) for k in layer_keys]
# layer_keys = [re.sub("^res5.", "layer4.", k) for k in layer_keys]
# blocks
layer_keys = [k.replace(".branch1.", ".shortcut.") for k in layer_keys]
layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys]
layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys]
layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys]
# DensePose substitutions
layer_keys = [re.sub("^body.conv.fcn", "body_conv_fcn", k) for k in layer_keys]
layer_keys = [k.replace("AnnIndex.lowres", "ann_index_lowres") for k in layer_keys]
layer_keys = [k.replace("Index.UV.lowres", "index_uv_lowres") for k in layer_keys]
layer_keys = [k.replace("U.lowres", "u_lowres") for k in layer_keys]
layer_keys = [k.replace("V.lowres", "v_lowres") for k in layer_keys]
return layer_keys
def convert_c2_detectron_names(weights):
"""
Map Caffe2 Detectron weight names to Detectron2 names.
Args:
weights (dict): name -> tensor
Returns:
dict: detectron2 names -> tensor
dict: detectron2 names -> C2 names
"""
logger = logging.getLogger(__name__)
logger.info("Renaming Caffe2 weights ......")
original_keys = sorted(weights.keys())
layer_keys = copy.deepcopy(original_keys)
layer_keys = convert_basic_c2_names(layer_keys)
# --------------------------------------------------------------------------
# RPN hidden representation conv
# --------------------------------------------------------------------------
# FPN case
# In the C2 model, the RPN hidden layer conv is defined for FPN level 2 and then
# shared for all other levels, hence the appearance of "fpn2"
layer_keys = [
k.replace("conv.rpn.fpn2", "proposal_generator.rpn_head.conv") for k in layer_keys
]
# Non-FPN case
layer_keys = [k.replace("conv.rpn", "proposal_generator.rpn_head.conv") for k in layer_keys]
# --------------------------------------------------------------------------
# RPN box transformation conv
# --------------------------------------------------------------------------
# FPN case (see note above about "fpn2")
layer_keys = [
k.replace("rpn.bbox.pred.fpn2", "proposal_generator.rpn_head.anchor_deltas")
for k in layer_keys
]
layer_keys = [
k.replace("rpn.cls.logits.fpn2", "proposal_generator.rpn_head.objectness_logits")
for k in layer_keys
]
# Non-FPN case
layer_keys = [
k.replace("rpn.bbox.pred", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys
]
layer_keys = [
k.replace("rpn.cls.logits", "proposal_generator.rpn_head.objectness_logits")
for k in layer_keys
]
# --------------------------------------------------------------------------
# Fast R-CNN box head
# --------------------------------------------------------------------------
layer_keys = [re.sub("^bbox\\.pred", "bbox_pred", k) for k in layer_keys]
layer_keys = [re.sub("^cls\\.score", "cls_score", k) for k in layer_keys]
layer_keys = [re.sub("^fc6\\.", "box_head.fc1.", k) for k in layer_keys]
layer_keys = [re.sub("^fc7\\.", "box_head.fc2.", k) for k in layer_keys]
# 4conv1fc head tensor names: head_conv1_w, head_conv1_gn_s
layer_keys = [re.sub("^head\\.conv", "box_head.conv", k) for k in layer_keys]
# --------------------------------------------------------------------------
# FPN lateral and output convolutions
# --------------------------------------------------------------------------
def fpn_map(name):
"""
Look for keys with the following patterns:
1) Starts with "fpn.inner."
Example: "fpn.inner.res2.2.sum.lateral.weight"
Meaning: These are lateral pathway convolutions
2) Starts with "fpn.res"
Example: "fpn.res2.2.sum.weight"
Meaning: These are FPN output convolutions
"""
splits = name.split(".")
norm = ".norm" if "norm" in splits else ""
if name.startswith("fpn.inner."):
# splits example: ['fpn', 'inner', 'res2', '2', 'sum', 'lateral', 'weight']
stage = int(splits[2][len("res") :])
return "fpn_lateral{}{}.{}".format(stage, norm, splits[-1])
elif name.startswith("fpn.res"):
# splits example: ['fpn', 'res2', '2', 'sum', 'weight']
stage = int(splits[1][len("res") :])
return "fpn_output{}{}.{}".format(stage, norm, splits[-1])
return name
layer_keys = [fpn_map(k) for k in layer_keys]
# --------------------------------------------------------------------------
# Mask R-CNN mask head
# --------------------------------------------------------------------------
# roi_heads.StandardROIHeads case
layer_keys = [k.replace(".[mask].fcn", "mask_head.mask_fcn") for k in layer_keys]
layer_keys = [re.sub("^\\.mask\\.fcn", "mask_head.mask_fcn", k) for k in layer_keys]
layer_keys = [k.replace("mask.fcn.logits", "mask_head.predictor") for k in layer_keys]
# roi_heads.Res5ROIHeads case
layer_keys = [k.replace("conv5.mask", "mask_head.deconv") for k in layer_keys]
# --------------------------------------------------------------------------
# Keypoint R-CNN head
# --------------------------------------------------------------------------
# interestingly, the keypoint head convs have blob names that are simply "conv_fcnX"
layer_keys = [k.replace("conv.fcn", "roi_heads.keypoint_head.conv_fcn") for k in layer_keys]
layer_keys = [
k.replace("kps.score.lowres", "roi_heads.keypoint_head.score_lowres") for k in layer_keys
]
layer_keys = [k.replace("kps.score.", "roi_heads.keypoint_head.score.") for k in layer_keys]
# --------------------------------------------------------------------------
# Done with replacements
# --------------------------------------------------------------------------
assert len(set(layer_keys)) == len(layer_keys)
assert len(original_keys) == len(layer_keys)
new_weights = {}
new_keys_to_original_keys = {}
for orig, renamed in zip(original_keys, layer_keys):
new_keys_to_original_keys[renamed] = orig
if renamed.startswith("bbox_pred.") or renamed.startswith("mask_head.predictor."):
# remove the meaningless prediction weight for background class
new_start_idx = 4 if renamed.startswith("bbox_pred.") else 1
new_weights[renamed] = weights[orig][new_start_idx:]
logger.info(
"Remove prediction weight for background class in {}. The shape changes from "
"{} to {}.".format(
renamed, tuple(weights[orig].shape), tuple(new_weights[renamed].shape)
)
)
elif renamed.startswith("cls_score."):
# move weights of bg class from original index 0 to last index
logger.info(
"Move classification weights for background class in {} from index 0 to "
"index {}.".format(renamed, weights[orig].shape[0] - 1)
)
new_weights[renamed] = torch.cat([weights[orig][1:], weights[orig][:1]])
else:
new_weights[renamed] = weights[orig]
return new_weights, new_keys_to_original_keys
# Note the current matching is not symmetric.
# it assumes model_state_dict will have longer names.
def align_and_update_state_dicts(model_state_dict, ckpt_state_dict, c2_conversion=True):
"""
Match names between the two state-dict, and returns a new chkpt_state_dict with names
converted to match model_state_dict with heuristics. The returned dict can be later
loaded with fvcore checkpointer.
If `c2_conversion==True`, `ckpt_state_dict` is assumed to be a Caffe2
model and will be renamed at first.
Strategy: suppose that the models that we will create will have prefixes appended
to each of its keys, for example due to an extra level of nesting that the original
pre-trained weights from ImageNet won't contain. For example, model.state_dict()
might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains
res2.conv1.weight. We thus want to match both parameters together.
For that, we look for each model weight, look among all loaded keys if there is one
that is a suffix of the current weight name, and use it if that's the case.
If multiple matches exist, take the one with longest size
of the corresponding name. For example, for the same model as before, the pretrained
weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case,
we want to match backbone[0].body.conv1.weight to conv1.weight, and
backbone[0].body.res2.conv1.weight to res2.conv1.weight.
"""
model_keys = sorted(model_state_dict.keys())
if c2_conversion:
ckpt_state_dict, original_keys = convert_c2_detectron_names(ckpt_state_dict)
# original_keys: the name in the original dict (before renaming)
else:
original_keys = {x: x for x in ckpt_state_dict.keys()}
ckpt_keys = sorted(ckpt_state_dict.keys())
def match(a, b):
# Matched ckpt_key should be a complete (starts with '.') suffix.
# For example, roi_heads.mesh_head.whatever_conv1 does not match conv1,
# but matches whatever_conv1 or mesh_head.whatever_conv1.
return a == b or a.endswith("." + b)
# get a matrix of string matches, where each (i, j) entry correspond to the size of the
# ckpt_key string, if it matches
match_matrix = [len(j) if match(i, j) else 0 for i in model_keys for j in ckpt_keys]
match_matrix = torch.as_tensor(match_matrix).view(len(model_keys), len(ckpt_keys))
# use the matched one with longest size in case of multiple matches
max_match_size, idxs = match_matrix.max(1)
# remove indices that correspond to no-match
idxs[max_match_size == 0] = -1
logger = logging.getLogger(__name__)
# matched_pairs (matched checkpoint key --> matched model key)
matched_keys = {}
result_state_dict = {}
for idx_model, idx_ckpt in enumerate(idxs.tolist()):
if idx_ckpt == -1:
continue
key_model = model_keys[idx_model]
key_ckpt = ckpt_keys[idx_ckpt]
value_ckpt = ckpt_state_dict[key_ckpt]
shape_in_model = model_state_dict[key_model].shape
if shape_in_model != value_ckpt.shape:
logger.warning(
"Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format(
key_ckpt, value_ckpt.shape, key_model, shape_in_model
)
)
logger.warning(
"{} will not be loaded. Please double check and see if this is desired.".format(
key_ckpt
)
)
continue
assert key_model not in result_state_dict
result_state_dict[key_model] = value_ckpt
if key_ckpt in matched_keys: # already added to matched_keys
logger.error(
"Ambiguity found for {} in checkpoint!"
"It matches at least two keys in the model ({} and {}).".format(
key_ckpt, key_model, matched_keys[key_ckpt]
)
)
raise ValueError("Cannot match one checkpoint key to multiple keys in the model.")
matched_keys[key_ckpt] = key_model
# logging:
matched_model_keys = sorted(matched_keys.values())
if len(matched_model_keys) == 0:
logger.warning("No weights in checkpoint matched with model.")
return ckpt_state_dict
common_prefix = _longest_common_prefix(matched_model_keys)
rev_matched_keys = {v: k for k, v in matched_keys.items()}
original_keys = {k: original_keys[rev_matched_keys[k]] for k in matched_model_keys}
model_key_groups = _group_keys_by_module(matched_model_keys, original_keys)
table = []
memo = set()
for key_model in matched_model_keys:
if key_model in memo:
continue
if key_model in model_key_groups:
group = model_key_groups[key_model]
memo |= set(group)
shapes = [tuple(model_state_dict[k].shape) for k in group]
table.append(
(
_longest_common_prefix([k[len(common_prefix) :] for k in group]) + "*",
_group_str([original_keys[k] for k in group]),
" ".join([str(x).replace(" ", "") for x in shapes]),
)
)
else:
key_checkpoint = original_keys[key_model]
shape = str(tuple(model_state_dict[key_model].shape))
table.append((key_model[len(common_prefix) :], key_checkpoint, shape))
table_str = tabulate(
table, tablefmt="pipe", headers=["Names in Model", "Names in Checkpoint", "Shapes"]
)
logger.info(
"Following weights matched with "
+ (f"submodule {common_prefix[:-1]}" if common_prefix else "model")
+ ":\n"
+ table_str
)
unmatched_ckpt_keys = [k for k in ckpt_keys if k not in set(matched_keys.keys())]
for k in unmatched_ckpt_keys:
result_state_dict[k] = ckpt_state_dict[k]
return result_state_dict
def _group_keys_by_module(keys: List[str], original_names: Dict[str, str]):
"""
Params in the same submodule are grouped together.
Args:
keys: names of all parameters
original_names: mapping from parameter name to their name in the checkpoint
Returns:
dict[name -> all other names in the same group]
"""
def _submodule_name(key):
pos = key.rfind(".")
if pos < 0:
return None
prefix = key[: pos + 1]
return prefix
all_submodules = [_submodule_name(k) for k in keys]
all_submodules = [x for x in all_submodules if x]
all_submodules = sorted(all_submodules, key=len)
ret = {}
for prefix in all_submodules:
group = [k for k in keys if k.startswith(prefix)]
if len(group) <= 1:
continue
original_name_lcp = _longest_common_prefix_str([original_names[k] for k in group])
if len(original_name_lcp) == 0:
# don't group weights if original names don't share prefix
continue
for k in group:
if k in ret:
continue
ret[k] = group
return ret
def _longest_common_prefix(names: List[str]) -> str:
"""
["abc.zfg", "abc.zef"] -> "abc."
"""
names = [n.split(".") for n in names]
m1,<fim_suffix><fim_middle> m2 = min(names), max(names)
|
m2 = min(names), max(names)
|
STATEMENT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/structures/masks.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates.
import copy
import itertools
import numpy as np
from typing import Any, Iterator, List, Union
import pycocotools.mask as mask_util
import torch
from torch import device
from detectron2.layers.roi_align import ROIAlign
from detectron2.utils.memory import retry_if_cuda_oom
from .boxes import Boxes
def polygon_area(x, y):
# Using the shoelace formula
# https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
def polygons_to_bitmask(polygons: List[np.ndarray], height: int, width: int) -> np.ndarray:
"""
Args:
polygons (list[ndarray]): each array has shape (Nx2,)
height, width (int)
Returns:
ndarray: a bool mask of shape (height, width)
"""
if len(polygons) == 0:
# COCOAPI does not support empty polygons
return np.zeros((height, width)).astype(np.bool)
rles = mask_util.frPyObjects(polygons, height, width)
rle = mask_util.merge(rles)
return<fim_suffix><fim_middle> mask_util.decode(rle).astype(np.bool)
|
mask_util.decode(rle).astype(np.bool)
|
STATEMENT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/structures/masks.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates.
import copy
import itertools
import numpy as np
from typing import Any, Iterator, List, Union
import pycocotools.mask as mask_util
import torch
from torch import device
from detectron2.layers.roi_align import ROIAlign
from detectron2.utils.memory import retry_if_cuda_oom
from .boxes import Boxes
def polygon_area(x, y):
# Using the shoelace formula
# https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
def polygons_to_bitmask(polygons: List[np.ndarray], height: int, width: int) -> np.ndarray:
"""
Args:
polygons (list[ndarray]): each array has shape (Nx2,)
height, width (int)
Returns:
ndarray: a bool mask of shape (height, width)
"""
if len(polygons) == 0:
# COCOAPI does not support empty polygons
return np.zeros((height, width)).astype(np.bool)
rles = mask_util.frPyObjects(polygons, height, width)
rle = mask_util.merge(rles)
return mask_util.decode(rle).astype(np.bool)
def rasterize_polygons_within_box(
polygons: List[np.ndarray], box: np.ndarray, mask_size: int
) -> torch.Tensor:
"""
Rasterize the polygons into a mask image and
crop the mask content in the given box.
The cropped mask is resized to (mask_size, mask_size).
This function is used when generating training targets for mask head in Mask R-CNN.
Given original ground-truth masks for an image, new ground-truth mask
training targets in the size of `mask_size x mask_size`
must be provided for each predicted box. This function will be called to
produce such targets.
Args:
polygons (list[ndarray[float]]): a list of polygons, which represents an instance.
box: 4-element numpy array
mask_size (int):
Returns:
Tensor: BoolTensor of shape (mask_size, mask_size)
"""
# 1. Shift the polygons w.r.t the boxes
w, h = box[2] - box[0], box[3] - box[1]
polygons = copy.deepcopy(polygons)
for p in polygons:
p[0::2] = p[0::2] - box[0]
p[1::2] = p[1::2] - box[1]
# 2. Rescale the polygons to the new box size
# max() to avoid division by small number
ratio_h = mask_size / max(h, 0.1)
ratio_w = mask_size / max(w, 0.1)
if ratio_h == ratio_w:
for p in polygons:
p *= ratio_h
else:
for p in polygons:
p[0::2] *= ratio_w
p[1::2] *= ratio_h
# 3. Rasterize the polygons with coco api
mask = polygons_to_bitmask(polygons, mask_size, mask_size)
mask = torch.from_numpy(mask)
return mask
class BitMasks:
"""
This class stores the segmentation masks for all objects in one image, in
the form of bitmaps.
Attributes:
tensor: bool Tensor of N,H,W, representing N instances in the image.
"""
def __init__(self, tensor: Union[torch.Tensor, np.ndarray]):
"""
Args:
tensor: bool Tensor of N,H,W, representing N instances in the image.
"""
device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
tensor = torch.as_tensor(tensor, dtype=torch.bool, device=device)
assert tensor.dim() == 3, tensor.size()
self.image_size = tensor.shape[1:]
self.tensor = tensor
@torch.jit.unused
def to(self, *args: Any, **kwargs: Any) -> "BitMasks":
return BitMasks(self.tensor.to(*args, **kwargs))
@property
def device(self) -> torch.device:
return self.tensor.device
@torch.jit.unused
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "BitMasks":
"""
Returns:
BitMasks: Create a new :class:`BitMasks` by indexing.
The following usage are allowed:
1. `new_masks = masks[3]`: return a `BitMasks` which contains only one mask.
2. `new_masks = masks[2:10]`: return a slice of masks.
3. `new_masks = masks[vector]`, where vector is a torch.BoolTensor
with `length = len(masks)`. Nonzero elements in the vector will be selected.
Note that the returned object might share storage with this object,
subject to Pytorch's indexing semantics.
"""
if isinstance(item, int):
return BitMasks(self.tensor[item].unsqueeze(0))
m = self.tensor[item]
assert m.dim() == 3, "Indexing on BitMasks with {} returns a tensor with shape {}!".format(
item, m.shape
)
return BitMasks(m)
@torch.jit.unused
def __iter__(self) -> torch.Tensor:
yield from self.tensor
@torch.jit.unused
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.tensor))
return s
def __len__(self) -> int:
return self.tensor.shape[0]
def nonempty(self) -> torch.Tensor:
"""
Find masks that are non-empty.
Returns:
Tensor: a BoolTensor which represents
whether each mask is empty (False) or non-empty (True).
"""
return self.tensor.flatten(1).any(dim=1)
@staticmethod
def from_polygon_masks(
polygon_masks: Union["PolygonMasks", List[List[np.ndarray]]], height: int, width: int
) -> "BitMasks":
"""
Args:
polygon_masks (list[list[ndarray]] or PolygonMasks)
height, width (int)
"""
if isinstance(polygon_masks, PolygonMasks):
polygon_masks = polygon_masks.polygons
masks = [polygons_to_bitmask(p, height, width) for p in polygon_masks]
if len(masks):
return BitMasks(torch.stack([torch.from_numpy(x) for x in masks]))
else:
return BitMasks(torch.empty(0, height, width, dtype=torch.bool))
@staticmethod
def from_roi_masks(roi_masks: "ROIMasks", height: int, width: int) -> "BitMasks":
"""
Args:
roi_masks:
height, width (int):
"""
return roi_masks.to_bitmasks(height, width)
def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:
"""
Crop each bitmask by the given box, and resize results to (mask_size, mask_size).
This can be used to prepare training targets for Mask R-CNN.
It has less reconstruction error compared to rasterization with polygons.
However we observe no difference in accuracy,
but BitMasks requires more memory to store all the masks.
Args:
boxes (Tensor): Nx4 tensor storing the boxes for each mask
mask_size (int): the size of the rasterized mask.
Returns:
Tensor:
A bool tensor of shape (N, mask_size, mask_size), where
N is the number of predicted boxes for this image.
"""
assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self))
device = self.tensor.device
batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None]
rois = torch.cat([batch_inds, boxes], dim=1) # Nx5
bit_masks = self.tensor.to(dtype=torch.float32)
rois = rois.to(device=device)
output = (
ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True)
.forward(bit_masks[:, None, :, :], rois)
.squeeze(1)
)
output = output >= 0.5
return output
def get_bounding_boxes(self) -> Boxes:
"""
Returns:
Boxes: tight bounding boxes around bitmasks.
If a mask is empty, it's bounding box will be all zero.
"""
boxes = torch.zeros(self.tensor.shape[0], 4, dtype=torch.float32)
x_any = torch.any(self.tensor, dim=1)
y_any = torch.any(self.tensor, dim=2)
for idx in range(self.tensor.shape[0]):
x = torch.where(x_any[idx, :])[0]
y = torch.where(y_any[idx, :])[0]
if len(x) > 0 and len(y) > 0:
boxes[idx, :] = torch.as_tensor(
[x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=torch.float32
)
return Boxes(boxes)
@staticmethod
def cat(bitmasks_list: List["BitMasks"]) -> "BitMasks":
"""
Concatenates a list of BitMasks into a single BitMasks
Arguments:
bitmasks_list (list[BitMasks])
Returns:
BitMasks: the concatenated BitMasks
"""
assert isinstance(bitmasks_list, (list, tuple))
assert len(bitmasks_list) > 0
assert all(isinstance(bitmask, BitMasks) for bitmask in bitmasks_list)
cat_bitmasks = type(bitmasks_list[0])(torch.cat([bm.tensor for bm in bitmasks_list], dim=0))
return cat_bitmasks
class PolygonMasks:
"""
This class stores the segmentation masks for all objects in one image, in the form of polygons.
Attributes:
polygons: list[list[ndarray]]. Each ndarray is a float64 vector representing a polygon.
"""
def __init__(self, polygons: List[List[Union[torch.Tensor, np.ndarray]]]):
"""
Arguments:
polygons (list[list[np.ndarray]]): The first
level of the list correspond to individual instances,
the second level to all the polygons that compose the
instance, and the third level to the polygon coordinates.
The third level array should have the format of
[x0, y0, x1, y1, ..., xn, yn] (n >= 3).
"""
if not isinstance(polygons, list):
raise ValueError(
"Cannot create PolygonMasks: Expect a list of list of polygons per image. "
"Got '{}' instead.".format(type(polygons))
)
def _make_array(t: Union[torch.Tensor, np.ndarray]) -> np.ndarray:
# Use float64 for higher precision, because why not?
# Always put polygons on CPU (self.to is a no-op) since they
# are supposed to be small tensors.
# May need to change this assumption if GPU placement becomes useful
if isinstance(t, torch.Tensor):
t = t.cpu().numpy()
return np.asarray(t).astype("float64")
def process_polygons(
polygons_per_instance: List[Union[torch.Tensor, np.ndarray]]
) -> List[np.ndarray]:
if not isinstance(polygons_per_instance, list):
raise ValueError(
"Cannot create polygons: Expect a list of polygons per instance. "
"Got '{}' instead.".format(type(polygons_per_instance))
)
# transform each polygon to a numpy array
polygons_per_instance = [_make_array(p) for p in polygons_per_instance]
for polygon in polygons_per_instance:
if len(polygon) % 2 != 0 or len(polygon) < 6:
raise ValueError(f"Cannot create a polygon from {len(polygon)} coordinates.")
return polygons_per_instance
self.polygons: List[List[np.ndarray]] = [
process_polygons(polygons_per_instance) for polygons_per_instance in polygons
]
def to(self, *args: Any, **kwargs: Any) -> "PolygonMasks":
return self
@property
def device(self) -> torch.device:
return torch.device("cpu")
def get_bounding_boxes(self) -> Boxes:
"""
Returns:
Boxes: tight bounding boxes around polygon masks.
"""
boxes = torch.zeros(len(self.polygons), 4, dtype=torch.float32)
for idx, polygons_per_instance in enumerate(self.polygons):
minxy = torch.as_tensor([float("inf"), float("inf")], dtype=torch.float32)
maxxy = torch.zeros(2, dtype=torch.float32)
for polygon in polygons_per_instance:
coords = torch.from_numpy(polygon).view(-1, 2).to(dtype=torch.float32)
minxy = torch.min(minxy, torch.min(coords, dim=0).values)
maxxy = torch.max(maxxy, torch.max(coords, dim=0).values)
boxes[idx, :2] = minxy
boxes[idx,<fim_suffix><fim_middle> 2:] = maxxy
|
2:] = maxxy
|
STATEMENT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/layers/losses.py<fim_prefix>import math
import torch
def diou_loss(
boxes1: torch.Tensor,
boxes2: torch.Tensor,
reduction: str = "none",
eps: float = 1e-7,
) -> torch.Tensor:
"""
Distance Intersection over Union Loss (Zhaohui Zheng et. al)
https://arxiv.org/abs/1911.08287
Args:
boxes1, boxes2 (Tensor): box locations in XYXY format, shape (N, 4) or (4,).
reduction: 'none' | 'mean' | 'sum'
'none': No reduction will be applied to the output.
'mean': The output will be averaged.
'sum': The output will be summed.
eps (float): small number to prevent division by zero
"""
x1, y1, x2, y2 = boxes1.unbind(dim=-1)
x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1)
# TODO: use torch._assert_async() when pytorch 1.8 support is dropped
assert (x2 >= x1).all(), "bad box: x1 larger than x2"
assert (y2 >= y1).all(), "bad box: y1 larger than y2"
# Intersection keypoints
xkis1 = torch.max(x1, x1g)
ykis1 = torch.max(y1, y1g)
xkis2 = torch.min(x2, x2g)
ykis2 = torch.min(y2, y2g)
intsct = torch.zeros_like(x1)
mask = (ykis2 > ykis1) & (xkis2 > xkis1)
intsct[mask] = (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask])
union = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g) - intsct + eps
iou = intsct / union
# smallest enclosing box
xc1 = torch.min(x1, x1g)
yc1 = torch.min(y1, y1g)
xc2 = torch.max(x2, x2g)
yc2 = torch.max(y2, y2g)
diag_len = ((xc2 - xc1) ** 2) + ((yc2 - yc1) ** 2) + eps
# centers of boxes
x_p = (x2 + x1) / 2
y_p = (y2 + y1) / 2
x_g<fim_suffix><fim_middle> = (x1g + x2g) / 2
|
= (x1g + x2g) / 2
|
STATEMENT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/tracking/hungarian_tracker.py<fim_prefix>#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import copy
import numpy as np
import torch
from detectron2.structures import Boxes, Instances
from .base_tracker import BaseTracker
from scipy.optimize import linear_sum_assignment
from ..config.config import CfgNode as CfgNode_
from typing import Dict
from detectron2.config import configurable
class BaseHungarianTracker(BaseTracker):
"""
A base class for all Hungarian trackers
"""
@configurable
def __init__(
self,
video_height: int,
video_width: int,
max_num_instances: int = 200,
max_lost_frame_count: int = 0,
min_box_rel_dim: float = 0.02,
min_instance_period: int = 1,
**kwargs
):
"""
Args:
video_height: height the video frame
video_width: width of the video frame
max_num_instances: maximum number of id allowed to be tracked
max_lost_frame_count: maximum number of frame an id can lost tracking
exceed this number, an id is considered as lost
forever
min_box_rel_dim: a percentage, smaller than this dimension, a bbox is
removed from tracking
min_instance_period: an instance will be shown after this number of period
since its first showing up in the video
"""
super().__init__(**kwargs)
self._video_height = video_height
self._video_width = video_width
self._max_num_instances = max_num_instances
self._max_lost_frame_count = max_lost_frame_count
self._min_box_rel_dim = min_box_rel_dim
self._min_instance_period = min_instance_period
@classmethod
def from_config(cls, cfg: CfgNode_) -> Dict:
raise NotImplementedError("Calling HungarianTracker::from_config")
def build_cost_matrix(self, instances: Instances, prev_instances: Instances) -> np.ndarray:
raise NotImplementedError("Calling HungarianTracker::build_matrix")
def update(self, instances: Instances) -> Instances:
if instances.has("pred_keypoints"):
raise NotImplementedError("Need to add support for keypoints")
instances = self._initialize_extra_fields(instances)
if self._prev_instances is not None:
self._untracked_prev_idx = set(range(len(self._prev_instances)))
cost_matrix = self.build_cost_matrix(instances, self._prev_instances)
matched_idx, matched_prev_idx = linear_sum_assignment(cost_matrix)
instances = self._process_matched_idx(instances, matched_idx, matched_prev_idx)
instances = self._process_unmatched_idx(instances, matched_idx)
instances = self._process_unmatched_prev_idx(instances, matched_prev_idx)
self._prev_instances = copy.deepcopy(instances)
return instances
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
if not instances.has("ID"):
instances.set("ID", [None] * len(instances))
if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
if not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances))
if self._prev_instances is None:
instances.ID = list(range(len(instances)))
self._id_count += len(instances)
instances.ID_period = [1] * len(instances)
instances.lost_frame_count = [0] * len(instances)
return instances
def _process_matched_idx(
self,
instances: Instances,
matched_idx: np.ndarray,
matched_prev_idx: np.ndarray
) -> Instances:
assert matched_idx.size == matched_prev_idx.size
for i in range(matched_idx.size):
instances.ID[matched_idx[i]] = self._prev_instances.ID[matched_prev_idx[i]]
instances.ID_period[matched_idx[i]] = \
self._prev_instances.ID_period[matched_prev_idx[i]] + 1
instances.lost_frame_count[matched_idx[i]] = 0
return instances
def _process_unmatched_idx(self, instances: Instances, matched_idx: np.ndarray) -> Instances:
untracked_idx = set(range(len(instances))).difference(set(matched_idx))
for idx in untracked_idx:
instances.ID[idx] = self._id_count
self._id_count += 1
instances.ID_period[idx] = 1
instances.lost_frame_count[idx] = 0
return instances
def _process_unmatched_prev_idx(
self,
instances: Instances,
matched_prev_idx:
np.ndarray
) -> Instances:
untracked_instances = Instances(
image_size=instances.image_size,
pred_boxes=[],
pred_masks=[],
pred_classes=[],
scores=[],
ID=[],
ID_period=[],
lost_frame_count=[],
)
prev_bboxes = list(self._prev_instances.pred_boxes)
prev_classes = list(self._prev_instances.pred_classes)
prev_scores = list(self._prev_instances.scores)
prev_ID_period = self._prev_instances.ID_period
if instances.has("pred_masks"):
prev_masks = list(self._prev_instances.pred_masks)
untracked_prev_idx = set(range(len(self._prev_instances))).difference(set(matched_prev_idx))
for idx in untracked_prev_idx:
x_left, y_top, x_right, y_bot = prev_bboxes[idx]
if (
(1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim)
or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim)
or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count
or prev_ID_period[idx] <= self._min_instance_period
):
continue
untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy()))
untracked_instances.pred_classes.append(int(prev_classes[idx]))
untracked_instances.scores.append(float(prev_scores[idx]))
untracked_instances.ID.append(self._prev_instances.ID[idx])<fim_suffix><fim_middle>
| null |
STATEMENT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/tracking/hungarian_tracker.py<fim_prefix>#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import copy
import numpy as np
import torch
from detectron2.structures import Boxes, Instances
from .base_tracker import BaseTracker
from scipy.optimize import linear_sum_assignment
from ..config.config import CfgNode as CfgNode_
from typing import Dict
from detectron2.config import configurable
class BaseHungarianTracker(BaseTracker):
"""
A base class for all Hungarian trackers
"""
@configurable
def __init__(
self,
video_height: int,
video_width: int,
max_num_instances: int = 200,
max_lost_frame_count: int = 0,
min_box_rel_dim: float = 0.02,
min_instance_period: int = 1,
**kwargs
):
"""
Args:
video_height: height the video frame
video_width: width of the video frame
max_num_instances: maximum number of id allowed to be tracked
max_lost_frame_count: maximum number of frame an id can lost tracking
exceed this number, an id is considered as lost
forever
min_box_rel_dim: a percentage, smaller than this dimension, a bbox is
removed from tracking
min_instance_period: an instance will be shown after this number of period
since its first showing up in the video
"""
super().__init__(**kwargs)
self._video_height = video_height
self._video_width = video_width
self._max_num_instances = max_num_instances
self._max_lost_frame_count = max_lost_frame_count
self._min_box_rel_dim = min_box_rel_dim
self._min_instance_period = min_instance_period
@classmethod
def from_config(cls, cfg: CfgNode_) -> Dict:
raise NotImplementedError("Calling HungarianTracker::from_config")
def build_cost_matrix(self, instances: Instances, prev_instances: Instances) -> np.ndarray:
raise NotImplementedError("Calling HungarianTracker::build_matrix")
def update(self, instances: Instances) -> Instances:
if instances.has("pred_keypoints"):
raise NotImplementedError("Need to add support for keypoints")
instances = self._initialize_extra_fields(instances)
if self._prev_instances is not None:
self._untracked_prev_idx = set(range(len(self._prev_instances)))
cost_matrix = self.build_cost_matrix(instances, self._prev_instances)
matched_idx, matched_prev_idx = linear_sum_assignment(cost_matrix)
instances = self._process_matched_idx(instances, matched_idx, matched_prev_idx)
instances = self._process_unmatched_idx(instances, matched_idx)
instances = self._process_unmatched_prev_idx(instances, matched_prev_idx)
self._prev_instances = copy.deepcopy(instances)
return instances
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
if not instances.has("ID"):
instances.set("ID", [None] * len(instances))
if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
if<fim_suffix><fim_middle> not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances))
|
not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances))
|
IF
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/external/davis2017-evaluation/davis2017/metrics.py<fim_prefix>import math
import numpy as np
import cv2
def db_eval_iou(annotation, segmentation, void_pixels=None):
""" Compute region similarity as the Jaccard Index.
Arguments:
annotation (ndarray): binary annotation map.
segmentation (ndarray): binary segmentation map.
void_pixels (ndarray): optional mask with void pixels
Return:
jaccard (float): region similarity
"""
assert annotation.shape == segmentation.shape, \
f'Annotation({annotation.shape}) and segmentation:{segmentation.shape} dimensions do not match.'
annotation = annotation.astype(np.bool)
segmentation = segmentation.astype(np.bool)
if void_pixels is not None:
assert annotation.shape == void_pixels.shape, \
f'Annotation({annotation.shape}) and void pixels:{void_pixels.shape} dimensions do not match.'
void_pixels = void_pixels.astype(np.bool)
else:
void_pixels = np.zeros_like(segmentation)
# Intersection between all sets
inters = np.sum((segmentation & annotation) & np.logical_not(void_pixels), axis=(-2, -1))
union = np.sum((segmentation | annotation) & np.logical_not(void_pixels), axis=(-2, -1))
j = inters / union
if j.ndim == 0:
j = 1 if np.isclose(union, 0) else j
else:
j[np.isclose(union, 0)] = 1
return j
def db_eval_boundary(annotation, segmentation, void_pixels=None, bound_th=0.008):
assert annotation.shape == segmentation.shape
if void_pixels is not None:
assert annotation.shape == void_pixels.shape
if annotation.ndim == 3:
n_frames = annotation.shape[0]
f_res = np.zeros(n_frames)
for frame_id in range(n_frames):
void_pixels_frame = None if void_pixels is None else void_pixels[frame_id, :, :, ]
f_res[frame_id] = f_measure(segmentation[frame_id, :, :, ], annotation[frame_id, :, :], void_pixels_frame, bound_th=bound_th)
elif annotation.ndim == 2:
f_res = f_measure(segmentation, annotation, void_pixels, bound_th=bound_th)
else:
raise ValueError(f'db_eval_boundary does not support tensors with {annotation.ndim} dimensions')
return f_res
def f_measure(foreground_mask, gt_mask, void_pixels=None, bound_th=0.008):
"""
Compute mean,recall and decay from per-frame evaluation.
Calculates precision/recall for boundaries between foreground_mask and
gt_mask using morphological operators to speed it up.
Arguments:
foreground_mask (ndarray): binary segmentation image.
gt_mask (ndarray): binary annotated image.
void_pixels (ndarray): optional mask with void pixels
Returns:
F (float): boundaries F-measure
"""
assert np.atleast_3d(foreground_mask).shape[2] == 1
if void_pixels is not None:
void_pixels = void_pixels.astype(np.bool)
else:
void_pixels = np.zeros_like(foreground_mask).astype(np.bool)
bound_pix = bound_th if bound_th >= 1 else \
np.ceil(bound_th * np.linalg.norm(foreground_mask.shape))
# Get the pixel boundaries of both masks
fg_boundary = _seg2bmap(foreground_mask * np.logical_not(void_pixels))
gt_boundary = _seg2bmap(gt_mask * np.logical_not(void_pixels))
from skimage.morphology import disk
# fg_dil = binary_dilation(fg_boundary, disk(bound_pix))
fg_dil = cv2.dilate(fg_boundary.astype(np.uint8), disk(bound_pix).astype(np.uint8))
# gt_dil = binary_dilation(gt_boundary, disk(bound_pix))
gt_dil = cv2.dilate(gt_boundary.astype(np.uint8), disk(bound_pix).astype(np.uint8))
# Get the intersection
gt_match = gt_boundary * fg_dil
fg_match = fg_boundary * gt_dil
# Area of the intersection
n_fg = np.sum(fg_boundary)
n_gt = np.sum(gt_boundary)
# % Compute precision and recall
if n_fg == 0 and n_gt > 0:
precision = 1
recall = 0
elif n_fg > 0 and n_gt == 0:
precision = 0
recall = 1
elif n_fg == 0 and n_gt == 0:
precision = 1
recall = 1
else:
precision = np.sum(fg_match) / float(n_fg)
recall = np.sum(gt_match) / float(n_gt)
# Compute F measure
if<fim_suffix><fim_middle> precision + recall == 0:
F = 0
else:
F = 2 * precision * recall / (precision + recall)
|
precision + recall == 0:
F = 0
else:
F = 2 * precision * recall / (precision + recall)
|
IF
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/config/config.py<fim_prefix># -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import functools
import inspect
import logging
from fvcore.common.config import CfgNode as _CfgNode
from detectron2.utils.file_io import PathManager
class CfgNode(_CfgNode):
"""
The same as `fvcore.common.config.CfgNode`, but different in:
1. Use unsafe yaml loading by default.
Note that this may lead to arbitrary code execution: you must not
load a config file from untrusted sources before manually inspecting
the content of the file.
2. Support config versioning.
When attempting to merge an old config, it will convert the old config automatically.
.. automethod:: clone
.. automethod:: freeze
.. automethod:: defrost
.. automethod:: is_frozen
.. automethod:: load_yaml_with_base
.. automethod:: merge_from_list
.. automethod:: merge_from_other_cfg
"""
@classmethod
def _open_cfg(cls, filename):
return PathManager.open(filename, "r")
# Note that the default value of allow_unsafe is changed to True
def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None:
"""
Load content from the given config file and merge it into self.
Args:
cfg_filename: config filename
allow_unsafe: allow unsafe yaml syntax
"""
assert PathManager.isfile(cfg_filename), f"Config file '{cfg_filename}' does not exist!"
loaded_cfg = self.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe)
loaded_cfg = type(self)(loaded_cfg)
# defaults.py needs to import CfgNode
from .defaults import _C
latest_ver = _C.VERSION
assert (
latest_ver == self.VERSION
), "CfgNode.merge_from_file is only allowed on a config object of latest version!"
logger = logging.getLogger(__name__)
loaded_ver = loaded_cfg.get("VERSION", None)
if loaded_ver is None:
from .compat import guess_version
loaded_ver = guess_version(loaded_cfg, cfg_filename)
assert loaded_ver <= self.VERSION, "Cannot merge a v{} config into a v{} config.".format(
loaded_ver, self.VERSION
)
if loaded_ver == self.VERSION:
self.merge_from_other_cfg(loaded_cfg)
else:
# compat.py needs to import CfgNode
from .compat import upgrade_config, downgrade_config
logger.warning(
"Loading an old v{} config file '{}' by automatically upgrading to v{}. "
"See docs/CHANGELOG.md for instructions to update your files.".format(
loaded_ver, cfg_filename, self.VERSION
)
)
# To convert, first obtain a full config at an old version
old_self = downgrade_config(self, to_version=loaded_ver)
old_self.merge_from_other_cfg(loaded_cfg)
new_config = upgrade_config(old_self)
self.clear()
self.update(new_config)
def dump(self, *args, **kwargs):
"""
Returns:
str: a yaml string representation of the config
"""
# to make it show up in docs
return super().dump(*args, **kwargs)
global_cfg = CfgNode()
def get_cfg() -> CfgNode:
"""
Get a copy of the default config.
Returns:
a detectron2 CfgNode instance.
"""
from .defaults import _C
return _C.clone()
def set_global_cfg(cfg: CfgNode) -> None:
"""
Let the global config point to the given cfg.
Assume that the given "cfg" has the key "KEY", after calling
`set_global_cfg(cfg)`, the key can be accessed by:
::
from detectron2.config import global_cfg
print(global_cfg.KEY)
By using a hacky global config, you can access these configs anywhere,
without having to pass the config object or the values deep into the code.
This is a hacky feature introduced for quick prototyping / research exploration.
"""
global global_cfg
global_cfg.clear()
global_cfg.update(cfg)
def configurable(init_func=None, *, from_config=None):
"""
Decorate a function or a class's __init__ method so that it can be called
with a :class:`CfgNode` object using a :func:`from_config` function that translates
:class:`CfgNode` to arguments.
Examples:
::
# Usage 1: Decorator on __init__:
class A:
@configurable
def __init__(self, a, b=2, c=3):
pass
@classmethod
def from_config(cls, cfg): # 'cfg' must be the first argument
# Returns kwargs to be passed to __init__
return {"a": cfg.A, "b": cfg.B}
a1 = A(a=1, b=2) # regular construction
a2 = A(cfg) # construct with a cfg
a3 = A(cfg, b=3, c=4) # construct with extra overwrite
# Usage 2: Decorator on any function. Needs an extra from_config argument:
@configurable(from_config=lambda cfg: {"a: cfg.A, "b": cfg.B})
def a_func(a, b=2, c=3):
pass
a1 = a_func(a=1, b=2) # regular call
a2 = a_func(cfg) # call with a cfg
a3 = a_func(cfg, b=3, c=4) # call with extra overwrite
Args:
init_func (callable): a class's ``__init__`` method in usage 1. The
class must have a ``from_config`` classmethod which takes `cfg` as
the first argument.
from_config (callable): the from_config function in usage 2. It must take `cfg`
as its first argument.
"""
if init_func is not None:
assert (
inspect.isfunction(init_func)
and from_config is None
and init_func.__name__ == "__init__"
), "Incorrect use of @configurable. Check API documentation for examples."
@functools.wraps(init_func)
def wrapped(self, *args, **kwargs):
try:
from_config_func = type(self).from_config
except AttributeError as e:
raise AttributeError(
"Class with @configurable must have a 'from_config' classmethod."
) from e
if not inspect.ismethod(from_config_func):
raise TypeError("Class with @configurable must have a 'from_config' classmethod.")
if _called_with_cfg(*args, **kwargs):
explicit_args = _get_args_from_config(from_config_func, *args, **kwargs)
init_func(self, **explicit_args)
else:
init_func(self, *args, **kwargs)
return wrapped
else:
if from_config is None:
return configurable # @configurable() is made equivalent to @configurable
assert inspect.isfunction(
from_config
), "from_config argument of configurable must be a function!"
def wrapper(orig_func):
@functools.wraps(orig_func)
def wrapped(*args, **kwargs):
if _called_with_cfg(*args, **kwargs):
explicit_args = _get_args_from_config(from_config, *args, **kwargs)
return orig_func(**explicit_args)
else:
return orig_func(*args, **kwargs)
wrapped.from_config = from_config
return wrapped
return wrapper
def _get_args_from_config(from_config_func, *args, **kwargs):
"""
Use `from_config` to obtain explicit arguments.
Returns:
dict: arguments to be used for cls.__init__
"""
signature = inspect.signature(from_config_func)
if list(signature.parameters.keys())[0] != "cfg":
if inspect.isfunction(from_config_func):
name = from_config_func.__name__
else:
name = f"{from_config_func.__self__}.from_config"
raise TypeError(f"{name} must take 'cfg' as the first argument!")
support_var_arg = any(
param.kind in [param.VAR_POSITIONAL, param.VAR_KEYWORD]
for param in signature.parameters.values()
)
if<fim_suffix><fim_middle> support_var_arg: # forward all arguments to from_config, if from_config accepts them
ret = from_config_func(*args, **kwargs)
else:
# forward supported arguments to from_config
supported_arg_names = set(signature.parameters.keys())
extra_kwargs = {}
for name in list(kwargs.keys()):
if name not in supported_arg_names:
extra_kwargs[name] = kwargs.pop(name)
ret = from_config_func(*args, **kwargs)
# forward the other arguments to __init__
ret.update(extra_kwargs)
|
support_var_arg: # forward all arguments to from_config, if from_config accepts them
ret = from_config_func(*args, **kwargs)
else:
# forward supported arguments to from_config
supported_arg_names = set(signature.parameters.keys())
extra_kwargs = {}
for name in list(kwargs.keys()):
if name not in supported_arg_names:
extra_kwargs[name] = kwargs.pop(name)
ret = from_config_func(*args, **kwargs)
# forward the other arguments to __init__
ret.update(extra_kwargs)
|
IF
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/tracking/bbox_iou_tracker.py<fim_prefix>#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import copy
from typing import List
import numpy as np
import torch
from detectron2.config import configurable
from detectron2.structures import Boxes, Instances
from detectron2.structures.boxes import pairwise_iou
from ..config.config import CfgNode as CfgNode_
from .base_tracker import BaseTracker, TRACKER_HEADS_REGISTRY
@TRACKER_HEADS_REGISTRY.register()
class BBoxIOUTracker(BaseTracker):
"""
A bounding box tracker to assign ID based on IoU between current and previous instances
"""
@configurable
def __init__(
self,
*,
video_height: int,
video_width: int,
max_num_instances: int = 200,
max_lost_frame_count: int = 0,
min_box_rel_dim: float = 0.02,
min_instance_period: int = 1,
track_iou_threshold: float = 0.5,
**kwargs
):
"""
Args:
video_height: height the video frame
video_width: width of the video frame
max_num_instances: maximum number of id allowed to be tracked
max_lost_frame_count: maximum number of frame an id can lost tracking
exceed this number, an id is considered as lost
forever
min_box_rel_dim: a percentage, smaller than this dimension, a bbox is
removed from tracking
min_instance_period: an instance will be shown after this number of period
since its first showing up in the video
track_iou_threshold: iou threshold, below this number a bbox pair is removed
from tracking
"""
super().__init__(**kwargs)
self._video_height = video_height
self._video_width = video_width
self._max_num_instances = max_num_instances
self._max_lost_frame_count = max_lost_frame_count
self._min_box_rel_dim = min_box_rel_dim
self._min_instance_period = min_instance_period
self._track_iou_threshold = track_iou_threshold
@classmethod
def from_config(cls, cfg: CfgNode_):
"""
Old style initialization using CfgNode
Args:
cfg: D2 CfgNode, config file
Return:
dictionary storing arguments for __init__ method
"""
assert "VIDEO_HEIGHT" in cfg.TRACKER_HEADS
assert "VIDEO_WIDTH" in cfg.TRACKER_HEADS
video_height = cfg.TRACKER_HEADS.get("VIDEO_HEIGHT")
video_width = cfg.TRACKER_HEADS.get("VIDEO_WIDTH")
max_num_instances = cfg.TRACKER_HEADS.get("MAX_NUM_INSTANCES", 200)
max_lost_frame_count = cfg.TRACKER_HEADS.get("MAX_LOST_FRAME_COUNT", 0)
min_box_rel_dim = cfg.TRACKER_HEADS.get("MIN_BOX_REL_DIM", 0.02)
min_instance_period = cfg.TRACKER_HEADS.get("MIN_INSTANCE_PERIOD", 1)
track_iou_threshold = cfg.TRACKER_HEADS.get("TRACK_IOU_THRESHOLD", 0.5)
return {
"_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker",
"video_height": video_height,
"video_width": video_width,
"max_num_instances": max_num_instances,
"max_lost_frame_count": max_lost_frame_count,
"min_box_rel_dim": min_box_rel_dim,
"min_instance_period": min_instance_period,
"track_iou_threshold": track_iou_threshold
}
def update(self, instances: Instances) -> Instances:
"""
See BaseTracker description
"""
if instances.has("pred_keypoints"):
raise NotImplementedError("Need to add support for keypoints")
instances = self._initialize_extra_fields(instances)
if self._prev_instances is not None:
# calculate IoU of all bbox pairs
iou_all = pairwise_iou(
boxes1=instances.pred_boxes,
boxes2=self._prev_instances.pred_boxes,
)
# sort IoU in descending order
bbox_pairs = self._create_prediction_pairs(instances, iou_all)
# assign previous ID to current bbox if IoU > track_iou_threshold
self._reset_fields()
for bbox_pair in bbox_pairs:
idx = bbox_pair["idx"]
prev_id = bbox_pair["prev_id"]
if idx in self._matched_idx \
or prev_id in self._matched_ID \
or bbox_pair["IoU"] < self._track_iou_threshold:
continue
instances.ID[idx] = prev_id
instances.ID_period[idx] = bbox_pair["prev_period"] + 1
instances.lost_frame_count[idx] = 0
self._matched_idx.add(idx)
self._matched_ID.add(prev_id)
self._untracked_prev_idx.remove(bbox_pair["prev_idx"])
instances = self._assign_new_id(instances)
instances = self._merge_untracked_instances(instances)
self._prev_instances = copy.deepcopy(instances)
return instances
def _create_prediction_pairs(
self, instances: Instances, iou_all: np.ndarray
) -> List:
"""
For all instances in previous and current frames, create pairs. For each
pair, store index of the instance in current frame predcitions, index in
previous predictions, ID in previous predictions, IoU of the bboxes in this
pair, period in previous predictions.
Args:
instances: D2 Instances, for predictions of the current frame
iou_all: IoU for all bboxes pairs
Return:
A list of IoU for all pairs
"""
bbox_pairs = []
for i in range(len(instances)):
for j in range(len(self._prev_instances)):
bbox_pairs.append(
{
"idx": i,
"prev_idx": j,
"prev_id": self._prev_instances.ID[j],
"IoU": iou_all[i, j],
"prev_period": self._prev_instances.ID_period[j],
}
)
return bbox_pairs
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
if not instances.has("ID"):
instances.set("ID", [None] * len(instances))
if<fim_suffix><fim_middle> not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
|
not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
|
IF
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/config/instantiate.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates.
import dataclasses
import logging
from collections import abc
from typing import Any
from detectron2.utils.registry import _convert_target_to_string, locate
__all__ = ["dump_dataclass", "instantiate"]
def dump_dataclass(obj: Any):
"""
Dump a dataclass recursively into a dict that can be later instantiated.
Args:
obj: a dataclass object
Returns:
dict
"""
assert dataclasses.is_dataclass(obj) and not isinstance(
obj, type
), "dump_dataclass() requires an instance of a dataclass."
ret = {"_target_": _convert_target_to_string(type(obj))}
for f in dataclasses.fields(obj):
v = getattr(obj, f.name)
if dataclasses.is_dataclass(v):
v = dump_dataclass(v)
if isinstance(v, (list, tuple)):
v = [dump_dataclass(x) if dataclasses.is_dataclass(x) else x for x in v]
ret[f.name] = v
return ret
def instantiate(cfg):
"""
Recursively instantiate objects defined in dictionaries by
"_target_" and arguments.
Args:
cfg: a dict-like object with "_target_" that defines the caller, and
other keys that define the arguments
Returns:
object instantiated by cfg
"""
from omegaconf import ListConfig
if isinstance(cfg, ListConfig):
lst = [instantiate(x) for x in cfg]
return ListConfig(lst, flags={"allow_objects": True})
if isinstance(cfg, list):
# Specialize for list, because many classes take
# list[objects] as arguments, such as ResNet, DatasetMapper
return [instantiate(x) for x in cfg]
if isinstance(cfg, abc.Mapping) and "_target_" in cfg:
# conceptually equivalent to hydra.utils.instantiate(cfg) with _convert_=all,
# but faster: https://github.com/facebookresearch/hydra/issues/1200
cfg = {k: instantiate(v) for k, v in cfg.items()}
cls = cfg.pop("_target_")
cls = instantiate(cls)
if<fim_suffix><fim_middle> isinstance(cls, str):
cls_name = cls
cls = locate(cls_name)
assert cls is not None, cls_name
else:
try:
cls_name = cls.__module__ + "." + cls.__qualname__
except Exception:
# target could be anything, so the above could fail
cls_name = str(cls)
|
isinstance(cls, str):
cls_name = cls
cls = locate(cls_name)
assert cls is not None, cls_name
else:
try:
cls_name = cls.__module__ + "." + cls.__qualname__
except Exception:
# target could be anything, so the above could fail
cls_name = str(cls)
|
IF
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/tracking/bbox_iou_tracker.py<fim_prefix>#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import copy
from typing import List
import numpy as np
import torch
from detectron2.config import configurable
from detectron2.structures import Boxes, Instances
from detectron2.structures.boxes import pairwise_iou
from ..config.config import CfgNode as CfgNode_
from .base_tracker import BaseTracker, TRACKER_HEADS_REGISTRY
@TRACKER_HEADS_REGISTRY.register()
class BBoxIOUTracker(BaseTracker):
"""
A bounding box tracker to assign ID based on IoU between current and previous instances
"""
@configurable
def __init__(
self,
*,
video_height: int,
video_width: int,
max_num_instances: int = 200,
max_lost_frame_count: int = 0,
min_box_rel_dim: float = 0.02,
min_instance_period: int = 1,
track_iou_threshold: float = 0.5,
**kwargs
):
"""
Args:
video_height: height the video frame
video_width: width of the video frame
max_num_instances: maximum number of id allowed to be tracked
max_lost_frame_count: maximum number of frame an id can lost tracking
exceed this number, an id is considered as lost
forever
min_box_rel_dim: a percentage, smaller than this dimension, a bbox is
removed from tracking
min_instance_period: an instance will be shown after this number of period
since its first showing up in the video
track_iou_threshold: iou threshold, below this number a bbox pair is removed
from tracking
"""
super().__init__(**kwargs)
self._video_height = video_height
self._video_width = video_width
self._max_num_instances = max_num_instances
self._max_lost_frame_count = max_lost_frame_count
self._min_box_rel_dim = min_box_rel_dim
self._min_instance_period = min_instance_period
self._track_iou_threshold = track_iou_threshold
@classmethod
def from_config(cls, cfg: CfgNode_):
"""
Old style initialization using CfgNode
Args:
cfg: D2 CfgNode, config file
Return:
dictionary storing arguments for __init__ method
"""
assert "VIDEO_HEIGHT" in cfg.TRACKER_HEADS
assert "VIDEO_WIDTH" in cfg.TRACKER_HEADS
video_height = cfg.TRACKER_HEADS.get("VIDEO_HEIGHT")
video_width = cfg.TRACKER_HEADS.get("VIDEO_WIDTH")
max_num_instances = cfg.TRACKER_HEADS.get("MAX_NUM_INSTANCES", 200)
max_lost_frame_count = cfg.TRACKER_HEADS.get("MAX_LOST_FRAME_COUNT", 0)
min_box_rel_dim = cfg.TRACKER_HEADS.get("MIN_BOX_REL_DIM", 0.02)
min_instance_period = cfg.TRACKER_HEADS.get("MIN_INSTANCE_PERIOD", 1)
track_iou_threshold = cfg.TRACKER_HEADS.get("TRACK_IOU_THRESHOLD", 0.5)
return {
"_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker",
"video_height": video_height,
"video_width": video_width,
"max_num_instances": max_num_instances,
"max_lost_frame_count": max_lost_frame_count,
"min_box_rel_dim": min_box_rel_dim,
"min_instance_period": min_instance_period,
"track_iou_threshold": track_iou_threshold
}
def update(self, instances: Instances) -> Instances:
"""
See BaseTracker description
"""
if instances.has("pred_keypoints"):
raise NotImplementedError("Need to add support for keypoints")
instances = self._initialize_extra_fields(instances)
if self._prev_instances is not None:
# calculate IoU of all bbox pairs
iou_all = pairwise_iou(
boxes1=instances.pred_boxes,
boxes2=self._prev_instances.pred_boxes,
)
# sort IoU in descending order
bbox_pairs = self._create_prediction_pairs(instances, iou_all)
# assign previous ID to current bbox if IoU > track_iou_threshold
self._reset_fields()
for bbox_pair in bbox_pairs:
idx = bbox_pair["idx"]
prev_id = bbox_pair["prev_id"]
if idx in self._matched_idx \
or prev_id in self._matched_ID \
or bbox_pair["IoU"] < self._track_iou_threshold:
continue
instances.ID[idx] = prev_id
instances.ID_period[idx] = bbox_pair["prev_period"] + 1
instances.lost_frame_count[idx] = 0
self._matched_idx.add(idx)
self._matched_ID.add(prev_id)
self._untracked_prev_idx.remove(bbox_pair["prev_idx"])
instances = self._assign_new_id(instances)
instances = self._merge_untracked_instances(instances)
self._prev_instances = copy.deepcopy(instances)
return instances
def _create_prediction_pairs(
self, instances: Instances, iou_all: np.ndarray
) -> List:
"""
For all instances in previous and current frames, create pairs. For each
pair, store index of the instance in current frame predcitions, index in
previous predictions, ID in previous predictions, IoU of the bboxes in this
pair, period in previous predictions.
Args:
instances: D2 Instances, for predictions of the current frame
iou_all: IoU for all bboxes pairs
Return:
A list of IoU for all pairs
"""
bbox_pairs = []
for i in range(len(instances)):
for j in range(len(self._prev_instances)):
bbox_pairs.append(
{
"idx": i,
"prev_idx": j,
"prev_id": self._prev_instances.ID[j],
"IoU": iou_all[i, j],
"prev_period": self._prev_instances.ID_period[j],
}
)
return bbox_pairs
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
if<fim_suffix><fim_middle> not instances.has("ID"):
instances.set("ID", [None] * len(instances))
|
not instances.has("ID"):
instances.set("ID", [None] * len(instances))
|
IF
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/structures/instances.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates.
import itertools
from typing import Any, Dict, List, Tuple, Union
import torch
class Instances:
"""
This class represents a list of instances in an image.
It stores the attributes of instances (e.g., boxes, masks, labels, scores) as "fields".
All fields must have the same ``__len__`` which is the number of instances.
All other (non-field) attributes of this class are considered private:
they must start with '_' and are not modifiable by a user.
Some basic usage:
1. Set/get/check a field:
.. code-block:: python
instances.gt_boxes = Boxes(...)
print(instances.pred_masks) # a tensor of shape (N, H, W)
print('gt_masks' in instances)
2. ``len(instances)`` returns the number of instances
3. Indexing: ``instances[indices]`` will apply the indexing on all the fields
and returns a new :class:`Instances`.
Typically, ``indices`` is a integer vector of indices,
or a binary mask of length ``num_instances``
.. code-block:: python
category_3_detections = instances[instances.pred_classes == 3]
confident_detections = instances[instances.scores > 0.9]
"""
def __init__(self, image_size: Tuple[int, int], **kwargs: Any):
"""
Args:
image_size (height, width): the spatial size of the image.
kwargs: fields to add to this `Instances`.
"""
self._image_size = image_size
self._fields: Dict[str, Any] = {}
for k, v in kwargs.items():
self.set(k, v)
@property
def image_size(self) -> Tuple[int, int]:
"""
Returns:
tuple: height, width
"""
return self._image_size
def __setattr__(self, name: str, val: Any) -> None:
if name.startswith("_"):
super().__setattr__(name, val)
else:
self.set(name, val)
def __getattr__(self, name: str) -> Any:
if<fim_suffix><fim_middle> name == "_fields" or name not in self._fields:
raise AttributeError("Cannot find field '{}' in the given Instances!".format(name))
|
name == "_fields" or name not in self._fields:
raise AttributeError("Cannot find field '{}' in the given Instances!".format(name))
|
IF
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/tracking/bbox_iou_tracker.py<fim_prefix>#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import copy
from typing import List
import numpy as np
import torch
from detectron2.config import configurable
from detectron2.structures import Boxes, Instances
from detectron2.structures.boxes import pairwise_iou
from ..config.config import CfgNode as CfgNode_
from .base_tracker import BaseTracker, TRACKER_HEADS_REGISTRY
@TRACKER_HEADS_REGISTRY.register()
class BBoxIOUTracker(BaseTracker):
"""
A bounding box tracker to assign ID based on IoU between current and previous instances
"""
@configurable
def __init__(
self,
*,
video_height: int,
video_width: int,
max_num_instances: int = 200,
max_lost_frame_count: int = 0,
min_box_rel_dim: float = 0.02,
min_instance_period: int = 1,
track_iou_threshold: float = 0.5,
**kwargs
):
"""
Args:
video_height: height the video frame
video_width: width of the video frame
max_num_instances: maximum number of id allowed to be tracked
max_lost_frame_count: maximum number of frame an id can lost tracking
exceed this number, an id is considered as lost
forever
min_box_rel_dim: a percentage, smaller than this dimension, a bbox is
removed from tracking
min_instance_period: an instance will be shown after this number of period
since its first showing up in the video
track_iou_threshold: iou threshold, below this number a bbox pair is removed
from tracking
"""
super().__init__(**kwargs)
self._video_height = video_height
self._video_width = video_width
self._max_num_instances = max_num_instances
self._max_lost_frame_count = max_lost_frame_count
self._min_box_rel_dim = min_box_rel_dim
self._min_instance_period = min_instance_period
self._track_iou_threshold = track_iou_threshold
@classmethod
def from_config(cls, cfg: CfgNode_):
"""
Old style initialization using CfgNode
Args:
cfg: D2 CfgNode, config file
Return:
dictionary storing arguments for __init__ method
"""
assert "VIDEO_HEIGHT" in cfg.TRACKER_HEADS
assert "VIDEO_WIDTH" in cfg.TRACKER_HEADS
video_height = cfg.TRACKER_HEADS.get("VIDEO_HEIGHT")
video_width = cfg.TRACKER_HEADS.get("VIDEO_WIDTH")
max_num_instances = cfg.TRACKER_HEADS.get("MAX_NUM_INSTANCES", 200)
max_lost_frame_count = cfg.TRACKER_HEADS.get("MAX_LOST_FRAME_COUNT", 0)
min_box_rel_dim = cfg.TRACKER_HEADS.get("MIN_BOX_REL_DIM", 0.02)
min_instance_period = cfg.TRACKER_HEADS.get("MIN_INSTANCE_PERIOD", 1)
track_iou_threshold = cfg.TRACKER_HEADS.get("TRACK_IOU_THRESHOLD", 0.5)
return {
"_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker",
"video_height": video_height,
"video_width": video_width,
"max_num_instances": max_num_instances,
"max_lost_frame_count": max_lost_frame_count,
"min_box_rel_dim": min_box_rel_dim,
"min_instance_period": min_instance_period,
"track_iou_threshold": track_iou_threshold
}
def update(self, instances: Instances) -> Instances:
"""
See BaseTracker description
"""
if instances.has("pred_keypoints"):
raise NotImplementedError("Need to add support for keypoints")
instances = self._initialize_extra_fields(instances)
if self._prev_instances is not None:
# calculate IoU of all bbox pairs
iou_all = pairwise_iou(
boxes1=instances.pred_boxes,
boxes2=self._prev_instances.pred_boxes,
)
# sort IoU in descending order
bbox_pairs = self._create_prediction_pairs(instances, iou_all)
# assign previous ID to current bbox if IoU > track_iou_threshold
self._reset_fields()
for bbox_pair in bbox_pairs:
idx = bbox_pair["idx"]
prev_id = bbox_pair["prev_id"]
if idx in self._matched_idx \
or prev_id in self._matched_ID \
or bbox_pair["IoU"] < self._track_iou_threshold:
continue
instances.ID[idx] = prev_id
instances.ID_period[idx] = bbox_pair["prev_period"] + 1
instances.lost_frame_count[idx] = 0
self._matched_idx.add(idx)
self._matched_ID.add(prev_id)
self._untracked_prev_idx.remove(bbox_pair["prev_idx"])
instances = self._assign_new_id(instances)
instances = self._merge_untracked_instances(instances)
self._prev_instances = copy.deepcopy(instances)
return instances
def _create_prediction_pairs(
self, instances: Instances, iou_all: np.ndarray
) -> List:
"""
For all instances in previous and current frames, create pairs. For each
pair, store index of the instance in current frame predcitions, index in
previous predictions, ID in previous predictions, IoU of the bboxes in this
pair, period in previous predictions.
Args:
instances: D2 Instances, for predictions of the current frame
iou_all: IoU for all bboxes pairs
Return:
A list of IoU for all pairs
"""
bbox_pairs = []
for i in range(len(instances)):
for j in range(len(self._prev_instances)):
bbox_pairs.append(
{
"idx": i,
"prev_idx": j,
"prev_id": self._prev_instances.ID[j],
"IoU": iou_all[i, j],
"prev_period": self._prev_instances.ID_period[j],
}
)
return bbox_pairs
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
if not instances.has("ID"):
instances.set("ID", [None] * len(instances))
if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
if<fim_suffix><fim_middle> not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances))
|
not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances))
|
IF
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/tracking/bbox_iou_tracker.py<fim_prefix>#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import copy
from typing import List
import numpy as np
import torch
from detectron2.config import configurable
from detectron2.structures import Boxes, Instances
from detectron2.structures.boxes import pairwise_iou
from ..config.config import CfgNode as CfgNode_
from .base_tracker import BaseTracker, TRACKER_HEADS_REGISTRY
@TRACKER_HEADS_REGISTRY.register()
class BBoxIOUTracker(BaseTracker):
"""
A bounding box tracker to assign ID based on IoU between current and previous instances
"""
@configurable
def __init__(
self,
*,
video_height: int,
video_width: int,
max_num_instances: int = 200,
max_lost_frame_count: int = 0,
min_box_rel_dim: float = 0.02,
min_instance_period: int = 1,
track_iou_threshold: float = 0.5,
**kwargs
):
"""
Args:
video_height: height the video frame
video_width: width of the video frame
max_num_instances: maximum number of id allowed to be tracked
max_lost_frame_count: maximum number of frame an id can lost tracking
exceed this number, an id is considered as lost
forever
min_box_rel_dim: a percentage, smaller than this dimension, a bbox is
removed from tracking
min_instance_period: an instance will be shown after this number of period
since its first showing up in the video
track_iou_threshold: iou threshold, below this number a bbox pair is removed
from tracking
"""
super().__init__(**kwargs)
self._video_height = video_height
self._video_width = video_width
self._max_num_instances = max_num_instances
self._max_lost_frame_count = max_lost_frame_count
self._min_box_rel_dim = min_box_rel_dim
self._min_instance_period = min_instance_period
self._track_iou_threshold = track_iou_threshold
@classmethod
def from_config(cls, cfg: CfgNode_):
"""
Old style initialization using CfgNode
Args:
cfg: D2 CfgNode, config file
Return:
dictionary storing arguments for __init__ method
"""
assert "VIDEO_HEIGHT" in cfg.TRACKER_HEADS
assert "VIDEO_WIDTH" in cfg.TRACKER_HEADS
video_height = cfg.TRACKER_HEADS.get("VIDEO_HEIGHT")
video_width = cfg.TRACKER_HEADS.get("VIDEO_WIDTH")
max_num_instances = cfg.TRACKER_HEADS.get("MAX_NUM_INSTANCES", 200)
max_lost_frame_count = cfg.TRACKER_HEADS.get("MAX_LOST_FRAME_COUNT", 0)
min_box_rel_dim = cfg.TRACKER_HEADS.get("MIN_BOX_REL_DIM", 0.02)
min_instance_period = cfg.TRACKER_HEADS.get("MIN_INSTANCE_PERIOD", 1)
track_iou_threshold = cfg.TRACKER_HEADS.get("TRACK_IOU_THRESHOLD", 0.5)
return {
"_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker",
"video_height": video_height,
"video_width": video_width,
"max_num_instances": max_num_instances,
"max_lost_frame_count": max_lost_frame_count,
"min_box_rel_dim": min_box_rel_dim,
"min_instance_period": min_instance_period,
"track_iou_threshold": track_iou_threshold
}
def update(self, instances: Instances) -> Instances:
"""
See BaseTracker description
"""
if instances.has("pred_keypoints"):
raise NotImplementedError("Need to add support for keypoints")
instances = self._initialize_extra_fields(instances)
if self._prev_instances is not None:
# calculate IoU of all bbox pairs
iou_all = pairwise_iou(
boxes1=instances.pred_boxes,
boxes2=self._prev_instances.pred_boxes,
)
# sort IoU in descending order
bbox_pairs = self._create_prediction_pairs(instances, iou_all)
# assign previous ID to current bbox if IoU > track_iou_threshold
self._reset_fields()
for bbox_pair in bbox_pairs:
idx = bbox_pair["idx"]
prev_id = bbox_pair["prev_id"]
if idx in self._matched_idx \
or prev_id in self._matched_ID \
or bbox_pair["IoU"] < self._track_iou_threshold:
continue
instances.ID[idx] = prev_id
instances.ID_period[idx] = bbox_pair["prev_period"] + 1
instances.lost_frame_count[idx] = 0
self._matched_idx.add(idx)
self._matched_ID.add(prev_id)
self._untracked_prev_idx.remove(bbox_pair["prev_idx"])
instances = self._assign_new_id(instances)
instances = self._merge_untracked_instances(instances)
self._prev_instances = copy.deepcopy(instances)
return instances
def _create_prediction_pairs(
self, instances: Instances, iou_all: np.ndarray
) -> List:
"""
For all instances in previous and current frames, create pairs. For each
pair, store index of the instance in current frame predcitions, index in
previous predictions, ID in previous predictions, IoU of the bboxes in this
pair, period in previous predictions.
Args:
instances: D2 Instances, for predictions of the current frame
iou_all: IoU for all bboxes pairs
Return:
A list of IoU for all pairs
"""
bbox_pairs = []
for i in range(len(instances)):
for j in range(len(self._prev_instances)):
bbox_pairs.append(
{
"idx": i,
"prev_idx": j,
"prev_id": self._prev_instances.ID[j],
"IoU": iou_all[i, j],
"prev_period": self._prev_instances.ID_period[j],
}
)
return bbox_pairs
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
if not instances.has("ID"):
instances.set("ID", [None] * len(instances))
if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
if not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances))
if<fim_suffix><fim_middle> self._prev_instances is None:
instances.ID = list(range(len(instances)))
self._id_count += len(instances)
instances.ID_period = [1] * len(instances)
instances.lost_frame_count = [0] * len(instances)
|
self._prev_instances is None:
instances.ID = list(range(len(instances)))
self._id_count += len(instances)
instances.ID_period = [1] * len(instances)
instances.lost_frame_count = [0] * len(instances)
|
IF
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/tracking/hungarian_tracker.py<fim_prefix>#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import copy
import numpy as np
import torch
from detectron2.structures import Boxes, Instances
from .base_tracker import BaseTracker
from scipy.optimize import linear_sum_assignment
from ..config.config import CfgNode as CfgNode_
from typing import Dict
from detectron2.config import configurable
class BaseHungarianTracker(BaseTracker):
"""
A base class for all Hungarian trackers
"""
@configurable
def __init__(
self,
video_height: int,
video_width: int,
max_num_instances: int = 200,
max_lost_frame_count: int = 0,
min_box_rel_dim: float = 0.02,
min_instance_period: int = 1,
**kwargs
):
"""
Args:
video_height: height the video frame
video_width: width of the video frame
max_num_instances: maximum number of id allowed to be tracked
max_lost_frame_count: maximum number of frame an id can lost tracking
exceed this number, an id is considered as lost
forever
min_box_rel_dim: a percentage, smaller than this dimension, a bbox is
removed from tracking
min_instance_period: an instance will be shown after this number of period
since its first showing up in the video
"""
super().__init__(**kwargs)
self._video_height = video_height
self._video_width = video_width
self._max_num_instances = max_num_instances
self._max_lost_frame_count = max_lost_frame_count
self._min_box_rel_dim = min_box_rel_dim
self._min_instance_period = min_instance_period
@classmethod
def from_config(cls, cfg: CfgNode_) -> Dict:
raise NotImplementedError("Calling HungarianTracker::from_config")
def build_cost_matrix(self, instances: Instances, prev_instances: Instances) -> np.ndarray:
raise NotImplementedError("Calling HungarianTracker::build_matrix")
def update(self, instances: Instances) -> Instances:
if instances.has("pred_keypoints"):
raise NotImplementedError("Need to add support for keypoints")
instances = self._initialize_extra_fields(instances)
if self._prev_instances is not None:
self._untracked_prev_idx = set(range(len(self._prev_instances)))
cost_matrix = self.build_cost_matrix(instances, self._prev_instances)
matched_idx, matched_prev_idx = linear_sum_assignment(cost_matrix)
instances = self._process_matched_idx(instances, matched_idx, matched_prev_idx)
instances = self._process_unmatched_idx(instances, matched_idx)
instances = self._process_unmatched_prev_idx(instances, matched_prev_idx)
self._prev_instances = copy.deepcopy(instances)
return instances
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
if not instances.has("ID"):
instances.set("ID", [None] * len(instances))
if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
if not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances))
if self._prev_instances is None:
instances.ID = list(range(len(instances)))
self._id_count += len(instances)
instances.ID_period = [1] * len(instances)
instances.lost_frame_count = [0] * len(instances)
return instances
def _process_matched_idx(
self,
instances: Instances,
matched_idx: np.ndarray,
matched_prev_idx: np.ndarray
) -> Instances:
assert matched_idx.size == matched_prev_idx.size
for i in range(matched_idx.size):
instances.ID[matched_idx[i]] = self._prev_instances.ID[matched_prev_idx[i]]
instances.ID_period[matched_idx[i]] = \
self._prev_instances.ID_period[matched_prev_idx[i]] + 1
instances.lost_frame_count[matched_idx[i]] = 0
return instances
def _process_unmatched_idx(self, instances: Instances, matched_idx: np.ndarray) -> Instances:
untracked_idx = set(range(len(instances))).difference(set(matched_idx))
for idx in untracked_idx:
instances.ID[idx] = self._id_count
self._id_count += 1
instances.ID_period[idx] = 1
instances.lost_frame_count[idx] = 0
return instances
def _process_unmatched_prev_idx(
self,
instances: Instances,
matched_prev_idx:
np.ndarray
) -> Instances:
untracked_instances = Instances(
image_size=instances.image_size,
pred_boxes=[],
pred_masks=[],
pred_classes=[],
scores=[],
ID=[],
ID_period=[],
lost_frame_count=[],
)
prev_bboxes = list(self._prev_instances.pred_boxes)
prev_classes = list(self._prev_instances.pred_classes)
prev_scores = list(self._prev_instances.scores)
prev_ID_period = self._prev_instances.ID_period
if instances.has("pred_masks"):
prev_masks = list(self._prev_instances.pred_masks)
untracked_prev_idx = set(range(len(self._prev_instances))).difference(set(matched_prev_idx))
for idx in untracked_prev_idx:
x_left, y_top, x_right, y_bot = prev_bboxes[idx]
if (
(1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim)
or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim)
or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count
or prev_ID_period[idx] <= self._min_instance_period
):
continue
untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy()))
untracked_instances.pred_classes.append(int(prev_classes[idx]))
untracked_instances.scores.append(float(prev_scores[idx]))
untracked_instances.ID.append(self._prev_instances.ID[idx])
untracked_instances.ID_period.append(self._prev_instances.ID_period[idx])
untracked_instances.lost_frame_count.append(
self._prev_instances.lost_frame_count[idx] + 1
)
if<fim_suffix><fim_middle> instances.has("pred_masks"):
untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8))
|
instances.has("pred_masks"):
untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8))
|
IF
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/structures/boxes.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates.
import math
import numpy as np
from enum import IntEnum, unique
from typing import List, Tuple, Union
import torch
from torch import device
_RawBoxType = Union[List[float], Tuple[float, ...], torch.Tensor, np.ndarray]
@unique
class BoxMode(IntEnum):
"""
Enum of different ways to represent a box.
"""
XYXY_ABS = 0
"""
(x0, y0, x1, y1) in absolute floating points coordinates.
The coordinates in range [0, width or height].
"""
XYWH_ABS = 1
"""
(x0, y0, w, h) in absolute floating points coordinates.
"""
XYXY_REL = 2
"""
Not yet supported!
(x0, y0, x1, y1) in range [0, 1]. They are relative to the size of the image.
"""
XYWH_REL = 3
"""
Not yet supported!
(x0, y0, w, h) in range [0, 1]. They are relative to the size of the image.
"""
XYWHA_ABS = 4
"""
(xc, yc, w, h, a) in absolute floating points coordinates.
(xc, yc) is the center of the rotated box, and the angle a is in degrees ccw.
"""
@staticmethod
def convert(box: _RawBoxType, from_mode: "BoxMode", to_mode: "BoxMode") -> _RawBoxType:
"""
Args:
box: can be a k-tuple, k-list or an Nxk array/tensor, where k = 4 or 5
from_mode, to_mode (BoxMode)
Returns:
The converted box of the same type.
"""
if from_mode == to_mode:
return box
original_type = type(box)
is_numpy = isinstance(box, np.ndarray)
single_box = isinstance(box, (list, tuple))
if single_box:
assert len(box) == 4 or len(box) == 5, (
"BoxMode.convert takes either a k-tuple/list or an Nxk array/tensor,"
" where k == 4 or 5"
)
arr = torch.tensor(box)[None, :]
else:
# avoid modifying the input box
if is_numpy:
arr = torch.from_numpy(np.asarray(box)).clone()
else:
arr = box.clone()
assert to_mode not in [BoxMode.XYXY_REL, BoxMode.XYWH_REL] and from_mode not in [
BoxMode.XYXY_REL,
BoxMode.XYWH_REL,
], "Relative mode not yet supported!"
if from_mode == BoxMode.XYWHA_ABS and to_mode == BoxMode.XYXY_ABS:
assert (
arr.shape[-1] == 5
), "The last dimension of input shape must be 5 for XYWHA format"
original_dtype = arr.dtype
arr = arr.double()
w = arr[:, 2]
h = arr[:, 3]
a = arr[:, 4]
c = torch.abs(torch.cos(a * math.pi / 180.0))
s = torch.abs(torch.sin(a * math.pi / 180.0))
# This basically computes the horizontal bounding rectangle of the rotated box
new_w = c * w + s * h
new_h = c * h + s * w
# convert center to top-left corner
arr[:, 0] -= new_w / 2.0
arr[:, 1] -= new_h / 2.0
# bottom-right corner
arr[:, 2] = arr[:, 0] + new_w
arr[:, 3] = arr[:, 1] + new_h
arr = arr[:, :4].to(dtype=original_dtype)
elif from_mode == BoxMode.XYWH_ABS and to_mode == BoxMode.XYWHA_ABS:
original_dtype = arr.dtype
arr = arr.double()
arr[:, 0] += arr[:, 2] / 2.0
arr[:, 1] += arr[:, 3] / 2.0
angles = torch.zeros((arr.shape[0], 1), dtype=arr.dtype)
arr = torch.cat((arr, angles), axis=1).to(dtype=original_dtype)
else:
if to_mode == BoxMode.XYXY_ABS and from_mode == BoxMode.XYWH_ABS:
arr[:, 2] += arr[:, 0]
arr[:, 3] += arr[:, 1]
elif from_mode == BoxMode.XYXY_ABS and to_mode == BoxMode.XYWH_ABS:
arr[:, 2] -= arr[:, 0]
arr[:, 3] -= arr[:, 1]
else:
raise NotImplementedError(
"Conversion from BoxMode {} to {} is not supported yet".format(
from_mode, to_mode
)
)
if single_box:
return original_type(arr.flatten().tolist())
if is_numpy:
return arr.numpy()
else:
return arr
class Boxes:
"""
This structure stores a list of boxes as a Nx4 torch.Tensor.
It supports some common methods about boxes
(`area`, `clip`, `nonempty`, etc),
and also behaves like a Tensor
(support indexing, `to(device)`, `.device`, and iteration over all boxes)
Attributes:
tensor (torch.Tensor): float matrix of Nx4. Each row is (x1, y1, x2, y2).
"""
def __init__(self, tensor: torch.Tensor):
"""
Args:
tensor (Tensor[float]): a Nx4 matrix. Each row is (x1, y1, x2, y2).
"""
device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device)
if tensor.numel() == 0:
# Use reshape, so we don't end up creating a new tensor that does not depend on
# the inputs (and consequently confuses jit)
tensor = tensor.reshape((-1, 4)).to(dtype=torch.float32, device=device)
assert tensor.dim() == 2 and tensor.size(-1) == 4, tensor.size()
self.tensor = tensor
def clone(self) -> "Boxes":
"""
Clone the Boxes.
Returns:
Boxes
"""
return Boxes(self.tensor.clone())
def to(self, device: torch.device):
# Boxes are assumed float32 and does not support to(dtype)
return Boxes(self.tensor.to(device=device))
def area(self) -> torch.Tensor:
"""
Computes the area of all the boxes.
Returns:
torch.Tensor: a vector with areas of each box.
"""
box = self.tensor
area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1])
return area
def clip(self, box_size: Tuple[int, int]) -> None:
"""
Clip (in place) the boxes by limiting x coordinates to the range [0, width]
and y coordinates to the range [0, height].
Args:
box_size (height, width): The clipping box's size.
"""
assert torch.isfinite(self.tensor).all(), "Box tensor contains infinite or NaN!"
h, w = box_size
x1 = self.tensor[:, 0].clamp(min=0, max=w)
y1 = self.tensor[:, 1].clamp(min=0, max=h)
x2 = self.tensor[:, 2].clamp(min=0, max=w)
y2 = self.tensor[:, 3].clamp(min=0, max=h)
self.tensor = torch.stack((x1, y1, x2, y2), dim=-1)
def nonempty(self, threshold: float = 0.0) -> torch.Tensor:
"""
Find boxes that are non-empty.
A box is considered empty, if either of its side is no larger than threshold.
Returns:
Tensor:
a binary vector which represents whether each box is empty
(False) or non-empty (True).
"""
box = self.tensor
widths = box[:, 2] - box[:, 0]
heights = box[:, 3] - box[:, 1]
keep = (widths > threshold) & (heights > threshold)
return keep
def __getitem__(self, item) -> "Boxes":
"""
Args:
item: int, slice, or a BoolTensor
Returns:
Boxes: Create a new :class:`Boxes` by indexing.
The following usage are allowed:
1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box.
2. `new_boxes = boxes[2:10]`: return a slice of boxes.
3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor
with `length = len(boxes)`. Nonzero elements in the vector will be selected.
Note that the returned Boxes might share storage with this Boxes,
subject to Pytorch's indexing semantics.
"""
if isinstance(item, int):
return Boxes(self.tensor[item].view(1, -1))
b = self.tensor[item]
assert b.dim() == 2, "Indexing on Boxes with {} failed to return a matrix!".format(item)
return Boxes(b)
def __len__(self) -> int:
return self.tensor.shape[0]
def __repr__(self) -> str:
return "Boxes(" + str(self.tensor) + ")"
def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor:
"""
Args:
box_size (height, width): Size of the reference box.
boundary_threshold (int): Boxes that extend beyond the reference box
boundary by more than boundary_threshold are considered "outside".
Returns:
a binary vector, indicating whether each box is inside the reference box.
"""
height, width = box_size
inds_inside = (
(self.tensor[..., 0] >= -boundary_threshold)
& (self.tensor[..., 1] >= -boundary_threshold)
& (self.tensor[..., 2] < width + boundary_threshold)
& (self.tensor[..., 3] < height + boundary_threshold)
)
return inds_inside
def get_centers(self) -> torch.Tensor:
"""
Returns:
The box centers in a Nx2 array of (x, y).
"""
return (self.tensor[:, :2] + self.tensor[:, 2:]) / 2
def scale(self, scale_x: float, scale_y: float) -> None:
"""
Scale the box with horizontal and vertical scaling factors
"""
self.tensor[:, 0::2] *= scale_x
self.tensor[:, 1::2] *= scale_y
@classmethod
def cat(cls, boxes_list: List["Boxes"]) -> "Boxes":
"""
Concatenates a list of Boxes into a single Boxes
Arguments:
boxes_list (list[Boxes])
Returns:
Boxes: the concatenated Boxes
"""
assert isinstance(boxes_list, (list, tuple))
if len(boxes_list) == 0:
return cls(torch.empty(0))
assert all([isinstance(box, Boxes) for box in boxes_list])
# use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input
cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0))
return cat_boxes
@property
def device(self) -> device:
return self.tensor.device
# type "Iterator[torch.Tensor]", yield, and iter() not supported by torchscript
# https://github.com/pytorch/pytorch/issues/18627
@torch.jit.unused
def __iter__(self):
"""
Yield a box as a Tensor of shape (4,) at a time.
"""
yield from self.tensor
def pairwise_intersection(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
"""
Given two lists of boxes of size N and M,
compute the intersection area between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax)
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: intersection, sized [N,M].
"""
boxes1, boxes2 = boxes1.tensor, boxes2.tensor
width_height = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) - torch.max(
boxes1[:, None, :2], boxes2[:, :2]
) # [N,M,2]
width_height.clamp_(min=0) # [N,M,2]
intersection = width_height.prod(dim=2) # [N,M]
return intersection
# implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py
# with slight modifications
def pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
"""
Given two lists of boxes of size N and M, compute the IoU
(intersection over union) between **all** N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
area1 = boxes1.area() # [N]
area2 = boxes2.area() #<fim_suffix><fim_middle> [M]
|
[M]
|
LINE_COMMENT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/structures/boxes.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates.
import math
import numpy as np
from enum import IntEnum, unique
from typing import List, Tuple, Union
import torch
from torch import device
_RawBoxType = Union[List[float], Tuple[float, ...], torch.Tensor, np.ndarray]
@unique
class BoxMode(IntEnum):
"""
Enum of different ways to represent a box.
"""
XYXY_ABS = 0
"""
(x0, y0, x1, y1) in absolute floating points coordinates.
The coordinates in range [0, width or height].
"""
XYWH_ABS = 1
"""
(x0, y0, w, h) in absolute floating points coordinates.
"""
XYXY_REL = 2
"""
Not yet supported!
(x0, y0, x1, y1) in range [0, 1]. They are relative to the size of the image.
"""
XYWH_REL = 3
"""
Not yet supported!
(x0, y0, w, h) in range [0, 1]. They are relative to the size of the image.
"""
XYWHA_ABS = 4
"""
(xc, yc, w, h, a) in absolute floating points coordinates.
(xc, yc) is the center of the rotated box, and the angle a is in degrees ccw.
"""
@staticmethod
def convert(box: _RawBoxType, from_mode: "BoxMode", to_mode: "BoxMode") -> _RawBoxType:
"""
Args:
box: can be a k-tuple, k-list or an Nxk array/tensor, where k = 4 or 5
from_mode, to_mode (BoxMode)
Returns:
The converted box of the same type.
"""
if from_mode == to_mode:
return box
original_type = type(box)
is_numpy = isinstance(box, np.ndarray)
single_box = isinstance(box, (list, tuple))
if single_box:
assert len(box) == 4 or len(box) == 5, (
"BoxMode.convert takes either a k-tuple/list or an Nxk array/tensor,"
" where k == 4 or 5"
)
arr = torch.tensor(box)[None, :]
else:
# avoid modifying the input box
if is_numpy:
arr = torch.from_numpy(np.asarray(box)).clone()
else:
arr = box.clone()
assert to_mode not in [BoxMode.XYXY_REL, BoxMode.XYWH_REL] and from_mode not in [
BoxMode.XYXY_REL,
BoxMode.XYWH_REL,
], "Relative mode not yet supported!"
if from_mode == BoxMode.XYWHA_ABS and to_mode == BoxMode.XYXY_ABS:
assert (
arr.shape[-1] == 5
), "The last dimension of input shape must be 5 for XYWHA format"
original_dtype = arr.dtype
arr = arr.double()
w = arr[:, 2]
h = arr[:, 3]
a = arr[:, 4]
c = torch.abs(torch.cos(a * math.pi / 180.0))
s = torch.abs(torch.sin(a * math.pi / 180.0))
# This basically computes the horizontal bounding rectangle of the rotated box
new_w = c * w + s * h
new_h = c * h + s * w
# convert center to top-left corner
arr[:, 0] -= new_w / 2.0
arr[:, 1] -= new_h / 2.0
# bottom-right corner
arr[:, 2] = arr[:, 0] + new_w
arr[:, 3] = arr[:, 1] + new_h
arr = arr[:, :4].to(dtype=original_dtype)
elif from_mode == BoxMode.XYWH_ABS and to_mode == BoxMode.XYWHA_ABS:
original_dtype = arr.dtype
arr = arr.double()
arr[:, 0] += arr[:, 2] / 2.0
arr[:, 1] += arr[:, 3] / 2.0
angles = torch.zeros((arr.shape[0], 1), dtype=arr.dtype)
arr = torch.cat((arr, angles), axis=1).to(dtype=original_dtype)
else:
if to_mode == BoxMode.XYXY_ABS and from_mode == BoxMode.XYWH_ABS:
arr[:, 2] += arr[:, 0]
arr[:, 3] += arr[:, 1]
elif from_mode == BoxMode.XYXY_ABS and to_mode == BoxMode.XYWH_ABS:
arr[:, 2] -= arr[:, 0]
arr[:, 3] -= arr[:, 1]
else:
raise NotImplementedError(
"Conversion from BoxMode {} to {} is not supported yet".format(
from_mode, to_mode
)
)
if single_box:
return original_type(arr.flatten().tolist())
if is_numpy:
return arr.numpy()
else:
return arr
class Boxes:
"""
This structure stores a list of boxes as a Nx4 torch.Tensor.
It supports some common methods about boxes
(`area`, `clip`, `nonempty`, etc),
and also behaves like a Tensor
(support indexing, `to(device)`, `.device`, and iteration over all boxes)
Attributes:
tensor (torch.Tensor): float matrix of Nx4. Each row is (x1, y1, x2, y2).
"""
def __init__(self, tensor: torch.Tensor):
"""
Args:
tensor (Tensor[float]): a Nx4 matrix. Each row is (x1, y1, x2, y2).
"""
device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device)
if tensor.numel() == 0:
# Use reshape, so we don't end up creating a new tensor that does not depend on
#<fim_suffix><fim_middle> the inputs (and consequently confuses jit)
|
the inputs (and consequently confuses jit)
|
LINE_COMMENT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/checkpoint/c2_model_loading.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates.
import copy
import logging
import re
from typing import Dict, List
import torch
from tabulate import tabulate
def convert_basic_c2_names(original_keys):
"""
Apply some basic name conversion to names in C2 weights.
It only deals with typical backbone models.
Args:
original_keys (list[str]):
Returns:
list[str]: The same number of strings matching those in original_keys.
"""
layer_keys = copy.deepcopy(original_keys)
layer_keys = [
{"pred_b": "linear_b", "pred_w": "linear_w"}.get(k, k) for k in layer_keys
] # some hard-coded mappings
layer_keys = [k.replace("_", ".") for k in layer_keys]
layer_keys = [re.sub("\\.b$", ".bias", k) for k in layer_keys]
layer_keys = [re.sub("\\.w$", ".weight", k) for k in layer_keys]
# Uniform both bn and gn names to "norm"
layer_keys = [re.sub("bn\\.s$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.bias$", "norm.bias", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.rm", "norm.running_mean", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.running.mean$", "norm.running_mean", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.riv$", "norm.running_var", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.running.var$", "norm.running_var", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.gamma$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.beta$", "norm.bias", k) for k in layer_keys]
layer_keys = [re.sub("gn\\.s$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("gn\\.bias$", "norm.bias", k) for k in layer_keys]
# stem
layer_keys = [re.sub("^res\\.conv1\\.norm\\.", "conv1.norm.", k) for k in layer_keys]
# to avoid mis-matching with "conv1" in other components (e.g. detection head)
layer_keys = [re.sub("^conv1\\.", "stem.conv1.", k) for k in layer_keys]
# layer1-4 is used by torchvision, however we follow the C2 naming strategy (res2-5)
# layer_keys = [re.sub("^res2.", "layer1.", k) for k in layer_keys]
# layer_keys = [re.sub("^res3.", "layer2.", k) for k in layer_keys]
# layer_keys = [re.sub("^res4.", "layer3.", k) for k in layer_keys]
# layer_keys = [re.sub("^res5.", "layer4.", k) for k in layer_keys]
# blocks
layer_keys = [k.replace(".branch1.", ".shortcut.") for k in layer_keys]
layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys]
layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys]
layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys]
# DensePose substitutions
layer_keys = [re.sub("^body.conv.fcn", "body_conv_fcn", k) for k in layer_keys]
layer_keys = [k.replace("AnnIndex.lowres", "ann_index_lowres") for k in layer_keys]
layer_keys = [k.replace("Index.UV.lowres", "index_uv_lowres") for k in layer_keys]
layer_keys = [k.replace("U.lowres", "u_lowres") for k in layer_keys]
layer_keys = [k.replace("V.lowres", "v_lowres") for k in layer_keys]
return layer_keys
def convert_c2_detectron_names(weights):
"""
Map Caffe2 Detectron weight names to Detectron2 names.
Args:
weights (dict): name -> tensor
Returns:
dict: detectron2 names -> tensor
dict: detectron2 names -> C2 names
"""
logger = logging.getLogger(__name__)
logger.info("Renaming Caffe2 weights ......")
original_keys = sorted(weights.keys())
layer_keys = copy.deepcopy(original_keys)
layer_keys = convert_basic_c2_names(layer_keys)
# --------------------------------------------------------------------------
#<fim_suffix><fim_middle> RPN hidden representation conv
|
RPN hidden representation conv
|
LINE_COMMENT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/external/davis2017-evaluation/davis2017/metrics.py<fim_prefix>import math
import numpy as np
import cv2
def db_eval_iou(annotation, segmentation, void_pixels=None):
""" Compute region similarity as the Jaccard Index.
Arguments:
annotation (ndarray): binary annotation map.
segmentation (ndarray): binary segmentation map.
void_pixels (ndarray): optional mask with void pixels
Return:
jaccard (float): region similarity
"""
assert annotation.shape == segmentation.shape, \
f'Annotation({annotation.shape}) and segmentation:{segmentation.shape} dimensions do not match.'
annotation = annotation.astype(np.bool)
segmentation = segmentation.astype(np.bool)
if void_pixels is not None:
assert annotation.shape == void_pixels.shape, \
f'Annotation({annotation.shape}) and void pixels:{void_pixels.shape} dimensions do not match.'
void_pixels = void_pixels.astype(np.bool)
else:
void_pixels = np.zeros_like(segmentation)
# Intersection between all sets
inters = np.sum((segmentation & annotation) & np.logical_not(void_pixels), axis=(-2, -1))
union = np.sum((segmentation | annotation) & np.logical_not(void_pixels), axis=(-2, -1))
j = inters / union
if j.ndim == 0:
j = 1 if np.isclose(union, 0) else j
else:
j[np.isclose(union, 0)] = 1
return j
def db_eval_boundary(annotation, segmentation, void_pixels=None, bound_th=0.008):
assert annotation.shape == segmentation.shape
if void_pixels is not None:
assert annotation.shape == void_pixels.shape
if annotation.ndim == 3:
n_frames = annotation.shape[0]
f_res = np.zeros(n_frames)
for frame_id in range(n_frames):
void_pixels_frame = None if void_pixels is None else void_pixels[frame_id, :, :, ]
f_res[frame_id] = f_measure(segmentation[frame_id, :, :, ], annotation[frame_id, :, :], void_pixels_frame, bound_th=bound_th)
elif annotation.ndim == 2:
f_res = f_measure(segmentation, annotation, void_pixels, bound_th=bound_th)
else:
raise ValueError(f'db_eval_boundary does not support tensors with {annotation.ndim} dimensions')
return f_res
def f_measure(foreground_mask, gt_mask, void_pixels=None, bound_th=0.008):
"""
Compute mean,recall and decay from per-frame evaluation.
Calculates precision/recall for boundaries between foreground_mask and
gt_mask using morphological operators to speed it up.
Arguments:
foreground_mask (ndarray): binary segmentation image.
gt_mask (ndarray): binary annotated image.
void_pixels (ndarray): optional mask with void pixels
Returns:
F (float): boundaries F-measure
"""
assert np.atleast_3d(foreground_mask).shape[2] == 1
if void_pixels is not None:
void_pixels = void_pixels.astype(np.bool)
else:
void_pixels = np.zeros_like(foreground_mask).astype(np.bool)
bound_pix = bound_th if bound_th >= 1 else \
np.ceil(bound_th * np.linalg.norm(foreground_mask.shape))
#<fim_suffix><fim_middle> Get the pixel boundaries of both masks
|
Get the pixel boundaries of both masks
|
LINE_COMMENT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/checkpoint/c2_model_loading.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates.
import copy
import logging
import re
from typing import Dict, List
import torch
from tabulate import tabulate
def convert_basic_c2_names(original_keys):
"""
Apply some basic name conversion to names in C2 weights.
It only deals with typical backbone models.
Args:
original_keys (list[str]):
Returns:
list[str]: The same number of strings matching those in original_keys.
"""
layer_keys = copy.deepcopy(original_keys)
layer_keys = [
{"pred_b": "linear_b", "pred_w": "linear_w"}.get(k, k) for k in layer_keys
] # some hard-coded mappings
layer_keys = [k.replace("_", ".") for k in layer_keys]
layer_keys = [re.sub("\\.b$", ".bias", k) for k in layer_keys]
layer_keys = [re.sub("\\.w$", ".weight", k) for k in layer_keys]
# Uniform both bn and gn names to "norm"
layer_keys = [re.sub("bn\\.s$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.bias$", "norm.bias", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.rm", "norm.running_mean", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.running.mean$", "norm.running_mean", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.riv$", "norm.running_var", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.running.var$", "norm.running_var", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.gamma$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.beta$", "norm.bias", k) for k in layer_keys]
layer_keys = [re.sub("gn\\.s$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("gn\\.bias$", "norm.bias", k) for k in layer_keys]
# stem
layer_keys = [re.sub("^res\\.conv1\\.norm\\.", "conv1.norm.", k) for k in layer_keys]
# to avoid mis-matching with "conv1" in other components (e.g. detection head)
layer_keys = [re.sub("^conv1\\.", "stem.conv1.", k) for k in layer_keys]
# layer1-4 is used by torchvision, however we follow the C2 naming strategy (res2-5)
# layer_keys = [re.sub("^res2.", "layer1.", k) for k in layer_keys]
# layer_keys = [re.sub("^res3.", "layer2.", k) for k in layer_keys]
# layer_keys = [re.sub("^res4.", "layer3.", k) for k in layer_keys]
# layer_keys = [re.sub("^res5.", "layer4.", k) for k in layer_keys]
# blocks
layer_keys = [k.replace(".branch1.", ".shortcut.") for k in layer_keys]
layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys]
layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys]
layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys]
# DensePose substitutions
layer_keys = [re.sub("^body.conv.fcn", "body_conv_fcn", k) for k in layer_keys]
layer_keys = [k.replace("AnnIndex.lowres", "ann_index_lowres") for k in layer_keys]
layer_keys = [k.replace("Index.UV.lowres", "index_uv_lowres") for k in layer_keys]
layer_keys = [k.replace("U.lowres", "u_lowres") for k in layer_keys]
layer_keys = [k.replace("V.lowres", "v_lowres") for k in layer_keys]
return layer_keys
def convert_c2_detectron_names(weights):
"""
Map Caffe2 Detectron weight names to Detectron2 names.
Args:
weights (dict): name -> tensor
Returns:
dict: detectron2 names -> tensor
dict: detectron2 names -> C2 names
"""
logger = logging.getLogger(__name__)
logger.info("Renaming Caffe2 weights ......")
original_keys = sorted(weights.keys())
layer_keys = copy.deepcopy(original_keys)
layer_keys = convert_basic_c2_names(layer_keys)
# --------------------------------------------------------------------------
# RPN hidden representation conv
# --------------------------------------------------------------------------
# FPN case
# In the C2 model, the RPN hidden layer conv is defined for FPN level 2 and then
# shared for all other levels, hence the appearance of "fpn2"
layer_keys = [
k.replace("conv.rpn.fpn2", "proposal_generator.rpn_head.conv") for k in layer_keys
]
# Non-FPN case
layer_keys = [k.replace("conv.rpn", "proposal_generator.rpn_head.conv") for k in layer_keys]
# --------------------------------------------------------------------------
# RPN box transformation conv
# --------------------------------------------------------------------------
# FPN case (see note above about "fpn2")
layer_keys = [
k.replace("rpn.bbox.pred.fpn2", "proposal_generator.rpn_head.anchor_deltas")
for k in layer_keys
]
layer_keys = [
k.replace("rpn.cls.logits.fpn2", "proposal_generator.rpn_head.objectness_logits")
for k in layer_keys
]
# Non-FPN case
layer_keys = [
k.replace("rpn.bbox.pred", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys
]
layer_keys = [
k.replace("rpn.cls.logits", "proposal_generator.rpn_head.objectness_logits")
for k in layer_keys
]
# --------------------------------------------------------------------------
# Fast R-CNN box head
# --------------------------------------------------------------------------
layer_keys = [re.sub("^bbox\\.pred", "bbox_pred", k) for k in layer_keys]
layer_keys = [re.sub("^cls\\.score", "cls_score", k) for k in layer_keys]
layer_keys = [re.sub("^fc6\\.", "box_head.fc1.", k) for k in layer_keys]
layer_keys = [re.sub("^fc7\\.", "box_head.fc2.", k) for k in layer_keys]
# 4conv1fc head tensor names: head_conv1_w, head_conv1_gn_s
layer_keys = [re.sub("^head\\.conv", "box_head.conv", k) for k in layer_keys]
# --------------------------------------------------------------------------
# FPN lateral and output convolutions
# --------------------------------------------------------------------------
def fpn_map(name):
"""
Look for keys with the following patterns:
1) Starts with "fpn.inner."
Example: "fpn.inner.res2.2.sum.lateral.weight"
Meaning: These are lateral pathway convolutions
2) Starts with "fpn.res"
Example: "fpn.res2.2.sum.weight"
Meaning: These are FPN output convolutions
"""
splits = name.split(".")
norm = ".norm" if "norm" in splits else ""
if name.startswith("fpn.inner."):
# splits example: ['fpn', 'inner', 'res2', '2', 'sum', 'lateral', 'weight']
stage = int(splits[2][len("res") :])
return "fpn_lateral{}{}.{}".format(stage, norm, splits[-1])
elif name.startswith("fpn.res"):
# splits example: ['fpn', 'res2', '2', 'sum', 'weight']
stage = int(splits[1][len("res") :])
return "fpn_output{}{}.{}".format(stage, norm, splits[-1])
return name
layer_keys = [fpn_map(k) for k in layer_keys]
# --------------------------------------------------------------------------
# Mask R-CNN mask head
# --------------------------------------------------------------------------
# roi_heads.StandardROIHeads case
layer_keys = [k.replace(".[mask].fcn", "mask_head.mask_fcn") for k in layer_keys]
layer_keys = [re.sub("^\\.mask\\.fcn", "mask_head.mask_fcn", k) for k in layer_keys]
layer_keys = [k.replace("mask.fcn.logits", "mask_head.predictor") for k in layer_keys]
# roi_heads.Res5ROIHeads case
layer_keys = [k.replace("conv5.mask", "mask_head.deconv") for k in layer_keys]
# --------------------------------------------------------------------------
# Keypoint R-CNN head
# --------------------------------------------------------------------------
# interestingly, the keypoint head convs have blob names that are simply "conv_fcnX"
layer_keys = [k.replace("conv.fcn", "roi_heads.keypoint_head.conv_fcn") for k in layer_keys]
layer_keys = [
k.replace("kps.score.lowres", "roi_heads.keypoint_head.score_lowres") for k in layer_keys
]
layer_keys = [k.replace("kps.score.", "roi_heads.keypoint_head.score.") for k in layer_keys]
# --------------------------------------------------------------------------
# Done with replacements
# --------------------------------------------------------------------------
assert len(set(layer_keys)) == len(layer_keys)
assert len(original_keys) == len(layer_keys)
new_weights = {}
new_keys_to_original_keys = {}
for orig, renamed in zip(original_keys, layer_keys):
new_keys_to_original_keys[renamed] = orig
if renamed.startswith("bbox_pred.") or renamed.startswith("mask_head.predictor."):
# remove the meaningless prediction weight for background class
new_start_idx = 4 if renamed.startswith("bbox_pred.") else 1
new_weights[renamed] = weights[orig][new_start_idx:]
logger.info(
"Remove prediction weight for background class in {}. The shape changes from "
"{} to {}.".format(
renamed, tuple(weights[orig].shape), tuple(new_weights[renamed].shape)
)
)
elif renamed.startswith("cls_score."):
# move weights of bg class from original index 0 to last index
logger.info(
"Move classification weights for background class in {} from index 0 to "
"index {}.".format(renamed, weights[orig].shape[0] - 1)
)
new_weights[renamed] = torch.cat([weights[orig][1:], weights[orig][:1]])
else:
new_weights[renamed] = weights[orig]
return new_weights, new_keys_to_original_keys
# Note the current matching is not symmetric.
# it assumes model_state_dict will have longer names.
def align_and_update_state_dicts(model_state_dict, ckpt_state_dict, c2_conversion=True):
"""
Match names between the two state-dict, and returns a new chkpt_state_dict with names
converted to match model_state_dict with heuristics. The returned dict can be later
loaded with fvcore checkpointer.
If `c2_conversion==True`, `ckpt_state_dict` is assumed to be a Caffe2
model and will be renamed at first.
Strategy: suppose that the models that we will create will have prefixes appended
to each of its keys, for example due to an extra level of nesting that the original
pre-trained weights from ImageNet won't contain. For example, model.state_dict()
might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains
res2.conv1.weight. We thus want to match both parameters together.
For that, we look for each model weight, look among all loaded keys if there is one
that is a suffix of the current weight name, and use it if that's the case.
If multiple matches exist, take the one with longest size
of the corresponding name. For example, for the same model as before, the pretrained
weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case,
we want to match backbone[0].body.conv1.weight to conv1.weight, and
backbone[0].body.res2.conv1.weight to res2.conv1.weight.
"""
model_keys = sorted(model_state_dict.keys())
if c2_conversion:
ckpt_state_dict, original_keys = convert_c2_detectron_names(ckpt_state_dict)
# original_keys: the name in the original dict (before renaming)
else:
original_keys = {x: x for x in ckpt_state_dict.keys()}
ckpt_keys = sorted(ckpt_state_dict.keys())
def match(a, b):
# Matched ckpt_key should be a complete (starts with '.') suffix.
# For example, roi_heads.mesh_head.whatever_conv1 does not match conv1,
# but matches whatever_conv1 or mesh_head.whatever_conv1.
return a == b or a.endswith("." + b)
# get a matrix of string matches, where each (i, j) entry correspond to the size of the
#<fim_suffix><fim_middle> ckpt_key string, if it matches
|
ckpt_key string, if it matches
|
LINE_COMMENT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/tracking/iou_weighted_hungarian_bbox_iou_tracker.py<fim_prefix>#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
from typing import List
import numpy as np
from .base_tracker import TRACKER_HEADS_REGISTRY
from .vanilla_hungarian_bbox_iou_tracker import VanillaHungarianBBoxIOUTracker
from detectron2.config import configurable, CfgNode as CfgNode_
@TRACKER_HEADS_REGISTRY.register()
class IOUWeightedHungarianBBoxIOUTracker(VanillaHungarianBBoxIOUTracker):
"""
A tracker using IoU as weight in Hungarian algorithm, also known
as Munkres or Kuhn-Munkres algorithm
"""
@configurable
def __init__(
self,
*,
video_height: int,
video_width: int,
max_num_instances: int = 200,
max_lost_frame_count: int = 0,
min_box_rel_dim: float = 0.02,
min_instance_period: int = 1,
track_iou_threshold: float = 0.5,
**kwargs
):
"""
Args:
video_height: height the video frame
video_width: width of the video frame
max_num_instances: maximum number of id allowed to be tracked
max_lost_frame_count: maximum number of frame an id can lost tracking
exceed this number, an id is considered as lost
forever
min_box_rel_dim: a percentage, smaller than this dimension, a bbox is
removed from tracking
min_instance_period: an instance will be shown after this number of period
since its first showing up in the video
track_iou_threshold: iou threshold, below this number a bbox pair is removed
from tracking
"""
super().__init__(
video_height=video_height,
video_width=video_width,
max_num_instances=max_num_instances,
max_lost_frame_count=max_lost_frame_count,
min_box_rel_dim=min_box_rel_dim,
min_instance_period=min_instance_period,
track_iou_threshold=track_iou_threshold
)
@classmethod
def from_config(cls, cfg: CfgNode_):
"""
Old style initialization using CfgNode
Args:
cfg: D2 CfgNode, config file
Return:
dictionary storing arguments for __init__ method
"""
assert "VIDEO_HEIGHT" in cfg.TRACKER_HEADS
assert "VIDEO_WIDTH" in cfg.TRACKER_HEADS
video_height = cfg.TRACKER_HEADS.get("VIDEO_HEIGHT")
video_width = cfg.TRACKER_HEADS.get("VIDEO_WIDTH")
max_num_instances = cfg.TRACKER_HEADS.get("MAX_NUM_INSTANCES", 200)
max_lost_frame_count = cfg.TRACKER_HEADS.get("MAX_LOST_FRAME_COUNT", 0)
min_box_rel_dim = cfg.TRACKER_HEADS.get("MIN_BOX_REL_DIM", 0.02)
min_instance_period = cfg.TRACKER_HEADS.get("MIN_INSTANCE_PERIOD", 1)
track_iou_threshold = cfg.TRACKER_HEADS.get("TRACK_IOU_THRESHOLD", 0.5)
return {
"_target_": "detectron2.tracking.iou_weighted_hungarian_bbox_iou_tracker.IOUWeightedHungarianBBoxIOUTracker", # noqa
"video_height": video_height,
"video_width": video_width,
"max_num_instances": max_num_instances,
"max_lost_frame_count": max_lost_frame_count,
"min_box_rel_dim": min_box_rel_dim,
"min_instance_period": min_instance_period,
"track_iou_threshold": track_iou_threshold
}
def assign_cost_matrix_values(self, cost_matrix: np.ndarray, bbox_pairs: List) -> np.ndarray:
"""
Based on IoU for each pair of bbox, assign the associated value in cost matrix
Args:
cost_matrix: np.ndarray, initialized 2D array with target dimensions
bbox_pairs: list of bbox pair, in each pair, iou value is stored
Return:
np.ndarray, cost_matrix with assigned values
"""
for pair in bbox_pairs:
#<fim_suffix><fim_middle> assign (-1 * IoU) for above threshold pairs, algorithms will minimize cost
|
assign (-1 * IoU) for above threshold pairs, algorithms will minimize cost
|
LINE_COMMENT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/external/davis2017-evaluation/davis2017/metrics.py<fim_prefix>import math
import numpy as np
import cv2
def db_eval_iou(annotation, segmentation, void_pixels=None):
""" Compute region similarity as the Jaccard Index.
Arguments:
annotation (ndarray): binary annotation map.
segmentation (ndarray): binary segmentation map.
void_pixels (ndarray): optional mask with void pixels
Return:
jaccard (float): region similarity
"""
assert annotation.shape == segmentation.shape, \
f'Annotation({annotation.shape}) and segmentation:{segmentation.shape} dimensions do not match.'
annotation = annotation.astype(np.bool)
segmentation = segmentation.astype(np.bool)
if void_pixels is not None:
assert annotation.shape == void_pixels.shape, \
f'Annotation({annotation.shape}) and void pixels:{void_pixels.shape} dimensions do not match.'
void_pixels = void_pixels.astype(np.bool)
else:
void_pixels = np.zeros_like(segmentation)
#<fim_suffix><fim_middle> Intersection between all sets
|
Intersection between all sets
|
LINE_COMMENT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/checkpoint/c2_model_loading.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates.
import copy
import logging
import re
from typing import Dict, List
import torch
from tabulate import tabulate
def convert_basic_c2_names(original_keys):
"""
Apply some basic name conversion to names in C2 weights.
It only deals with typical backbone models.
Args:
original_keys (list[str]):
Returns:
list[str]: The same number of strings matching those in original_keys.
"""
layer_keys = copy.deepcopy(original_keys)
layer_keys = [
{"pred_b": "linear_b", "pred_w": "linear_w"}.get(k, k) for k in layer_keys
] # some hard-coded mappings
layer_keys = [k.replace("_", ".") for k in layer_keys]
layer_keys = [re.sub("\\.b$", ".bias", k) for k in layer_keys]
layer_keys = [re.sub("\\.w$", ".weight", k) for k in layer_keys]
# Uniform both bn and gn names to "norm"
layer_keys = [re.sub("bn\\.s$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.bias$", "norm.bias", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.rm", "norm.running_mean", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.running.mean$", "norm.running_mean", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.riv$", "norm.running_var", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.running.var$", "norm.running_var", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.gamma$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.beta$", "norm.bias", k) for k in layer_keys]
layer_keys = [re.sub("gn\\.s$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("gn\\.bias$", "norm.bias", k) for k in layer_keys]
# stem
layer_keys = [re.sub("^res\\.conv1\\.norm\\.", "conv1.norm.", k) for k in layer_keys]
# to avoid mis-matching with "conv1" in other components (e.g. detection head)
layer_keys = [re.sub("^conv1\\.", "stem.conv1.", k) for k in layer_keys]
# layer1-4 is used by torchvision, however we follow the C2 naming strategy (res2-5)
# layer_keys = [re.sub("^res2.", "layer1.", k) for k in layer_keys]
# layer_keys = [re.sub("^res3.", "layer2.", k) for k in layer_keys]
# layer_keys = [re.sub("^res4.", "layer3.", k) for k in layer_keys]
# layer_keys = [re.sub("^res5.", "layer4.", k) for k in layer_keys]
# blocks
layer_keys = [k.replace(".branch1.", ".shortcut.") for k in layer_keys]
layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys]
layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys]
layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys]
# DensePose substitutions
layer_keys = [re.sub("^body.conv.fcn", "body_conv_fcn", k) for k in layer_keys]
layer_keys = [k.replace("AnnIndex.lowres", "ann_index_lowres") for k in layer_keys]
layer_keys = [k.replace("Index.UV.lowres", "index_uv_lowres") for k in layer_keys]
layer_keys = [k.replace("U.lowres", "u_lowres") for k in layer_keys]
layer_keys = [k.replace("V.lowres", "v_lowres") for k in layer_keys]
return layer_keys
def convert_c2_detectron_names(weights):
"""
Map Caffe2 Detectron weight names to Detectron2 names.
Args:
weights (dict): name -> tensor
Returns:
dict: detectron2 names -> tensor
dict: detectron2 names -> C2 names
"""
logger = logging.getLogger(__name__)
logger.info("Renaming Caffe2 weights ......")
original_keys = sorted(weights.keys())
layer_keys = copy.deepcopy(original_keys)
layer_keys = convert_basic_c2_names(layer_keys)
# --------------------------------------------------------------------------
# RPN hidden representation conv
# --------------------------------------------------------------------------
# FPN case
# In the C2 model, the RPN hidden layer conv is defined for FPN level 2 and then
# shared for all other levels, hence the appearance of "fpn2"
layer_keys = [
k.replace("conv.rpn.fpn2", "proposal_generator.rpn_head.conv") for k in layer_keys
]
# Non-FPN case
layer_keys = [k.replace("conv.rpn", "proposal_generator.rpn_head.conv") for k in layer_keys]
# --------------------------------------------------------------------------
# RPN box transformation conv
# --------------------------------------------------------------------------
# FPN case (see note above about "fpn2")
layer_keys = [
k.replace("rpn.bbox.pred.fpn2", "proposal_generator.rpn_head.anchor_deltas")
for k in layer_keys
]
layer_keys = [
k.replace("rpn.cls.logits.fpn2", "proposal_generator.rpn_head.objectness_logits")
for k in layer_keys
]
# Non-FPN case
layer_keys = [
k.replace("rpn.bbox.pred", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys
]
layer_keys = [
k.replace("rpn.cls.logits", "proposal_generator.rpn_head.objectness_logits")
for k in layer_keys
]
# --------------------------------------------------------------------------
# Fast R-CNN box head
# --------------------------------------------------------------------------
layer_keys = [re.sub("^bbox\\.pred", "bbox_pred", k) for k in layer_keys]
layer_keys = [re.sub("^cls\\.score", "cls_score", k) for k in layer_keys]
layer_keys = [re.sub("^fc6\\.", "box_head.fc1.", k) for k in layer_keys]
layer_keys = [re.sub("^fc7\\.", "box_head.fc2.", k) for k in layer_keys]
# 4conv1fc head tensor names: head_conv1_w, head_conv1_gn_s
layer_keys = [re.sub("^head\\.conv", "box_head.conv", k) for k in layer_keys]
# --------------------------------------------------------------------------
# FPN lateral and output convolutions
# --------------------------------------------------------------------------
def fpn_map(name):
"""
Look for keys with the following patterns:
1) Starts with "fpn.inner."
Example: "fpn.inner.res2.2.sum.lateral.weight"
Meaning: These are lateral pathway convolutions
2) Starts with "fpn.res"
Example: "fpn.res2.2.sum.weight"
Meaning: These are FPN output convolutions
"""
splits = name.split(".")
norm = ".norm" if "norm" in splits else ""
if name.startswith("fpn.inner."):
# splits example: ['fpn', 'inner', 'res2', '2', 'sum', 'lateral', 'weight']
stage = int(splits[2][len("res") :])
return "fpn_lateral{}{}.{}".format(stage, norm, splits[-1])
elif name.startswith("fpn.res"):
# splits example: ['fpn', 'res2', '2', 'sum', 'weight']
stage = int(splits[1][len("res") :])
return "fpn_output{}{}.{}".format(stage, norm, splits[-1])
return name
layer_keys = [fpn_map(k) for k in layer_keys]
# --------------------------------------------------------------------------
# Mask R-CNN mask head
# --------------------------------------------------------------------------
# roi_heads.StandardROIHeads case
layer_keys = [k.replace(".[mask].fcn", "mask_head.mask_fcn") for k in layer_keys]
layer_keys = [re.sub("^\\.mask\\.fcn", "mask_head.mask_fcn", k) for k in layer_keys]
layer_keys = [k.replace("mask.fcn.logits", "mask_head.predictor") for k in layer_keys]
# roi_heads.Res5ROIHeads case
layer_keys = [k.replace("conv5.mask", "mask_head.deconv") for k in layer_keys]
# --------------------------------------------------------------------------
# Keypoint R-CNN head
# --------------------------------------------------------------------------
# interestingly, the keypoint head convs have blob names that are simply "conv_fcnX"
layer_keys = [k.replace("conv.fcn", "roi_heads.keypoint_head.conv_fcn") for k in layer_keys]
layer_keys = [
k.replace("kps.score.lowres", "roi_heads.keypoint_head.score_lowres") for k in layer_keys
]
layer_keys = [k.replace("kps.score.", "roi_heads.keypoint_head.score.") for k in layer_keys]
# --------------------------------------------------------------------------
# Done with replacements
# --------------------------------------------------------------------------
assert len(set(layer_keys)) == len(layer_keys)
assert len(original_keys) == len(layer_keys)
new_weights = {}
new_keys_to_original_keys = {}
for orig, renamed in zip(original_keys, layer_keys):
new_keys_to_original_keys[renamed] = orig
if renamed.startswith("bbox_pred.") or renamed.startswith("mask_head.predictor."):
#<fim_suffix><fim_middle> remove the meaningless prediction weight for background class
|
remove the meaningless prediction weight for background class
|
LINE_COMMENT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/config/instantiate.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates.
import dataclasses
import logging
from collections import abc
from typing import Any
from detectron2.utils.registry import _convert_target_to_string, locate
__all__ = ["dump_dataclass", "instantiate"]
def dump_dataclass(obj: Any):
"""
Dump a dataclass recursively into a dict that can be later instantiated.
Args:
obj: a dataclass object
Returns:
dict
"""
assert dataclasses.is_dataclass(obj) and not isinstance(
obj, type
), "dump_dataclass() requires an instance of a dataclass."
ret = {"_target_": _convert_target_to_string(type(obj))}
for f in dataclasses.fields(obj):
v = getattr(obj, f.name)
if dataclasses.is_dataclass(v):
v = dump_dataclass(v)
if isinstance(v, (list, tuple)):
v = [dump_dataclass(x) if dataclasses.is_dataclass(x) else x for x in v]
ret[f.name] = v
return ret
def instantiate(cfg):
"""
Recursively instantiate objects defined in dictionaries by
"_target_" and arguments.
Args:
cfg: a dict-like object with "_target_" that defines the caller, and
other keys that define the arguments
Returns:
object instantiated by cfg
"""
from omegaconf import ListConfig
if isinstance(cfg, ListConfig):
lst = [instantiate(x) for x in cfg]
return ListConfig(lst, flags={"allow_objects": True})
if isinstance(cfg, list):
# Specialize for list, because many classes take
# list[objects] as arguments, such as ResNet, DatasetMapper
return [instantiate(x) for x in cfg]
if isinstance(cfg, abc.Mapping) and "_target_" in cfg:
# conceptually equivalent to hydra.utils.instantiate(cfg) with _convert_=all,
# but faster: https://github.com/facebookresearch/hydra/issues/1200
cfg = {k: instantiate(v) for k, v in cfg.items()}
cls = cfg.pop("_target_")
cls = instantiate(cls)
if isinstance(cls, str):
cls_name = cls
cls = locate(cls_name)
assert cls is not None, cls_name
else:
try:
cls_name = cls.__module__ + "." + cls.__qualname__
except Exception:
# target could be anything, so the above could fail
cls_name = str(cls)
assert callable(cls), f"_target_ {cls} does not define a callable object"
try:
return cls(**cfg)
except TypeError:
logger = logging.getLogger(__name__)
logger.error(f"Error when instantiating {cls_name}!")
raise
return cfg #<fim_suffix><fim_middle> return as-is if don't know what to do
|
return as-is if don't know what to do
|
LINE_COMMENT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/checkpoint/c2_model_loading.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates.
import copy
import logging
import re
from typing import Dict, List
import torch
from tabulate import tabulate
def convert_basic_c2_names(original_keys):
"""
Apply some basic name conversion to names in C2 weights.
It only deals with typical backbone models.
Args:
original_keys (list[str]):
Returns:
list[str]: The same number of strings matching those in original_keys.
"""
layer_keys = copy.deepcopy(original_keys)
layer_keys = [
{"pred_b": "linear_b", "pred_w": "linear_w"}.get(k, k) for k in layer_keys
] # some hard-coded mappings
layer_keys = [k.replace("_", ".") for k in layer_keys]
layer_keys = [re.sub("\\.b$", ".bias", k) for k in layer_keys]
layer_keys = [re.sub("\\.w$", ".weight", k) for k in layer_keys]
# Uniform both bn and gn names to "norm"
layer_keys = [re.sub("bn\\.s$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.bias$", "norm.bias", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.rm", "norm.running_mean", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.running.mean$", "norm.running_mean", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.riv$", "norm.running_var", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.running.var$", "norm.running_var", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.gamma$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.beta$", "norm.bias", k) for k in layer_keys]
layer_keys = [re.sub("gn\\.s$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("gn\\.bias$", "norm.bias", k) for k in layer_keys]
# stem
layer_keys = [re.sub("^res\\.conv1\\.norm\\.", "conv1.norm.", k) for k in layer_keys]
# to avoid mis-matching with "conv1" in other components (e.g. detection head)
layer_keys = [re.sub("^conv1\\.", "stem.conv1.", k) for k in layer_keys]
# layer1-4 is used by torchvision, however we follow the C2 naming strategy (res2-5)
# layer_keys = [re.sub("^res2.", "layer1.", k) for k in layer_keys]
# layer_keys = [re.sub("^res3.", "layer2.", k) for k in layer_keys]
# layer_keys = [re.sub("^res4.", "layer3.", k) for k in layer_keys]
# layer_keys = [re.sub("^res5.", "layer4.", k) for k in layer_keys]
# blocks
layer_keys = [k.replace(".branch1.", ".shortcut.") for k in layer_keys]
layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys]
layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys]
layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys]
# DensePose substitutions
layer_keys = [re.sub("^body.conv.fcn", "body_conv_fcn", k) for k in layer_keys]
layer_keys = [k.replace("AnnIndex.lowres", "ann_index_lowres") for k in layer_keys]
layer_keys = [k.replace("Index.UV.lowres", "index_uv_lowres") for k in layer_keys]
layer_keys = [k.replace("U.lowres", "u_lowres") for k in layer_keys]
layer_keys = [k.replace("V.lowres", "v_lowres") for k in layer_keys]
return layer_keys
def convert_c2_detectron_names(weights):
"""
Map Caffe2 Detectron weight names to Detectron2 names.
Args:
weights (dict): name -> tensor
Returns:
dict: detectron2 names -> tensor
dict: detectron2 names -> C2 names
"""
logger = logging.getLogger(__name__)
logger.info("Renaming Caffe2 weights ......")
original_keys = sorted(weights.keys())
layer_keys = copy.deepcopy(original_keys)
layer_keys = convert_basic_c2_names(layer_keys)
# --------------------------------------------------------------------------
# RPN hidden representation conv
# --------------------------------------------------------------------------
# FPN case
# In the C2 model, the RPN hidden layer conv is defined for FPN level 2 and then
# shared for all other levels, hence the appearance of "fpn2"
layer_keys = [
k.replace("conv.rpn.fpn2", "proposal_generator.rpn_head.conv") for k in layer_keys
]
# Non-FPN case
layer_keys = [k.replace("conv.rpn", "proposal_generator.rpn_head.conv") for k in layer_keys]
# --------------------------------------------------------------------------
# RPN box transformation conv
# --------------------------------------------------------------------------
# FPN case (see note above about "fpn2")
layer_keys = [
k.replace("rpn.bbox.pred.fpn2", "proposal_generator.rpn_head.anchor_deltas")
for k in layer_keys
]
layer_keys = [
k.replace("rpn.cls.logits.fpn2", "proposal_generator.rpn_head.objectness_logits")
for k in layer_keys
]
# Non-FPN case
layer_keys = [
k.replace("rpn.bbox.pred", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys
]
layer_keys = [
k.replace("rpn.cls.logits", "proposal_generator.rpn_head.objectness_logits")
for k in layer_keys
]
# --------------------------------------------------------------------------
# Fast R-CNN box head
# --------------------------------------------------------------------------
layer_keys = [re.sub("^bbox\\.pred", "bbox_pred", k) for k in layer_keys]
layer_keys = [re.sub("^cls\\.score", "cls_score", k) for k in layer_keys]
layer_keys = [re.sub("^fc6\\.", "box_head.fc1.", k) for k in layer_keys]
layer_keys = [re.sub("^fc7\\.", "box_head.fc2.", k) for k in layer_keys]
# 4conv1fc head tensor names: head_conv1_w, head_conv1_gn_s
layer_keys = [re.sub("^head\\.conv", "box_head.conv", k) for k in layer_keys]
# --------------------------------------------------------------------------
# FPN lateral and output convolutions
# --------------------------------------------------------------------------
def fpn_map(name):
"""
Look for keys with the following patterns:
1) Starts with "fpn.inner."
Example: "fpn.inner.res2.2.sum.lateral.weight"
Meaning: These are lateral pathway convolutions
2) Starts with "fpn.res"
Example: "fpn.res2.2.sum.weight"
Meaning: These are FPN output convolutions
"""
splits = name.split(".")
norm = ".norm" if "norm" in splits else ""
if name.startswith("fpn.inner."):
# splits example: ['fpn', 'inner', 'res2', '2', 'sum', 'lateral', 'weight']
stage = int(splits[2][len("res") :])
return "fpn_lateral{}{}.{}".format(stage, norm, splits[-1])
elif name.startswith("fpn.res"):
# splits example: ['fpn', 'res2', '2', 'sum', 'weight']
stage = int(splits[1][len("res") :])
return "fpn_output{}{}.{}".format(stage, norm, splits[-1])
return name
layer_keys = [fpn_map(k) for k in layer_keys]
# --------------------------------------------------------------------------
# Mask R-CNN mask head
# --------------------------------------------------------------------------
# roi_heads.StandardROIHeads case
layer_keys = [k.replace(".[mask].fcn", "mask_head.mask_fcn") for k in layer_keys]
layer_keys = [re.sub("^\\.mask\\.fcn", "mask_head.mask_fcn", k) for k in layer_keys]
layer_keys = [k.replace("mask.fcn.logits", "mask_head.predictor") for k in layer_keys]
# roi_heads.Res5ROIHeads case
layer_keys = [k.replace("conv5.mask", "mask_head.deconv") for k in layer_keys]
# --------------------------------------------------------------------------
# Keypoint R-CNN head
# --------------------------------------------------------------------------
# interestingly, the keypoint head convs have blob names that are simply "conv_fcnX"
layer_keys = [k.replace("conv.fcn", "roi_heads.keypoint_head.conv_fcn") for k in layer_keys]
layer_keys = [
k.replace("kps.score.lowres", "roi_heads.keypoint_head.score_lowres") for k in layer_keys
]
layer_keys = [k.replace("kps.score.", "roi_heads.keypoint_head.score.") for k in layer_keys]
#<fim_suffix><fim_middle> --------------------------------------------------------------------------
|
--------------------------------------------------------------------------
|
LINE_COMMENT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/structures/masks.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates.
import copy
import itertools
import numpy as np
from typing import Any, Iterator, List, Union
import pycocotools.mask as mask_util
import torch
from torch import device
from detectron2.layers.roi_align import ROIAlign
from detectron2.utils.memory import retry_if_cuda_oom
from .boxes import Boxes
def polygon_area(x, y):
# Using the shoelace formula
# https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
def polygons_to_bitmask(polygons: List[np.ndarray], height: int, width: int) -> np.ndarray:
"""
Args:
polygons (list[ndarray]): each array has shape (Nx2,)
height, width (int)
Returns:
ndarray: a bool mask of shape (height, width)
"""
if len(polygons) == 0:
# COCOAPI does not support empty polygons
return np.zeros((height, width)).astype(np.bool)
rles = mask_util.frPyObjects(polygons, height, width)
rle = mask_util.merge(rles)
return mask_util.decode(rle).astype(np.bool)
def rasterize_polygons_within_box(
polygons: List[np.ndarray], box: np.ndarray, mask_size: int
) -> torch.Tensor:
"""
Rasterize the polygons into a mask image and
crop the mask content in the given box.
The cropped mask is resized to (mask_size, mask_size).
This function is used when generating training targets for mask head in Mask R-CNN.
Given original ground-truth masks for an image, new ground-truth mask
training targets in the size of `mask_size x mask_size`
must be provided for each predicted box. This function will be called to
produce such targets.
Args:
polygons (list[ndarray[float]]): a list of polygons, which represents an instance.
box: 4-element numpy array
mask_size (int):
Returns:
Tensor: BoolTensor of shape (mask_size, mask_size)
"""
# 1. Shift the polygons w.r.t the boxes
w, h = box[2] - box[0], box[3] - box[1]
polygons = copy.deepcopy(polygons)
for p in polygons:
p[0::2] = p[0::2] - box[0]
p[1::2] = p[1::2] - box[1]
# 2. Rescale the polygons to the new box size
# max() to avoid division by small number
ratio_h = mask_size / max(h, 0.1)
ratio_w = mask_size / max(w, 0.1)
if ratio_h == ratio_w:
for p in polygons:
p *= ratio_h
else:
for p in polygons:
p[0::2] *= ratio_w
p[1::2] *= ratio_h
# 3. Rasterize the polygons with coco api
mask = polygons_to_bitmask(polygons, mask_size, mask_size)
mask = torch.from_numpy(mask)
return mask
class BitMasks:
"""
This class stores the segmentation masks for all objects in one image, in
the form of bitmaps.
Attributes:
tensor: bool Tensor of N,H,W, representing N instances in the image.
"""
def __init__(self, tensor: Union[torch.Tensor, np.ndarray]):
"""
Args:
tensor: bool Tensor of N,H,W, representing N instances in the image.
"""
device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
tensor = torch.as_tensor(tensor, dtype=torch.bool, device=device)
assert tensor.dim() == 3, tensor.size()
self.image_size = tensor.shape[1:]
self.tensor = tensor
@torch.jit.unused
def to(self, *args: Any, **kwargs: Any) -> "BitMasks":
return BitMasks(self.tensor.to(*args, **kwargs))
@property
def device(self) -> torch.device:
return self.tensor.device
@torch.jit.unused
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "BitMasks":
"""
Returns:
BitMasks: Create a new :class:`BitMasks` by indexing.
The following usage are allowed:
1. `new_masks = masks[3]`: return a `BitMasks` which contains only one mask.
2. `new_masks = masks[2:10]`: return a slice of masks.
3. `new_masks = masks[vector]`, where vector is a torch.BoolTensor
with `length = len(masks)`. Nonzero elements in the vector will be selected.
Note that the returned object might share storage with this object,
subject to Pytorch's indexing semantics.
"""
if isinstance(item, int):
return BitMasks(self.tensor[item].unsqueeze(0))
m = self.tensor[item]
assert m.dim() == 3, "Indexing on BitMasks with {} returns a tensor with shape {}!".format(
item, m.shape
)
return BitMasks(m)
@torch.jit.unused
def __iter__(self) -> torch.Tensor:
yield from self.tensor
@torch.jit.unused
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.tensor))
return s
def __len__(self) -> int:
return self.tensor.shape[0]
def nonempty(self) -> torch.Tensor:
"""
Find masks that are non-empty.
Returns:
Tensor: a BoolTensor which represents
whether each mask is empty (False) or non-empty (True).
"""
return self.tensor.flatten(1).any(dim=1)
@staticmethod
def from_polygon_masks(
polygon_masks: Union["PolygonMasks", List[List[np.ndarray]]], height: int, width: int
) -> "BitMasks":
"""
Args:
polygon_masks (list[list[ndarray]] or PolygonMasks)
height, width (int)
"""
if isinstance(polygon_masks, PolygonMasks):
polygon_masks = polygon_masks.polygons
masks = [polygons_to_bitmask(p, height, width) for p in polygon_masks]
if len(masks):
return BitMasks(torch.stack([torch.from_numpy(x) for x in masks]))
else:
return BitMasks(torch.empty(0, height, width, dtype=torch.bool))
@staticmethod
def from_roi_masks(roi_masks: "ROIMasks", height: int, width: int) -> "BitMasks":
"""
Args:
roi_masks:
height, width (int):
"""
return roi_masks.to_bitmasks(height, width)
def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:
"""
Crop each bitmask by the given box, and resize results to (mask_size, mask_size).
This can be used to prepare training targets for Mask R-CNN.
It has less reconstruction error compared to rasterization with polygons.
However we observe no difference in accuracy,
but BitMasks requires more memory to store all the masks.
Args:
boxes (Tensor): Nx4 tensor storing the boxes for each mask
mask_size (int): the size of the rasterized mask.
Returns:
Tensor:
A bool tensor of shape (N, mask_size, mask_size), where
N is the number of predicted boxes for this image.
"""
assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self))
device = self.tensor.device
batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None]
rois = torch.cat([batch_inds, boxes], dim=1) # Nx5
bit_masks = self.tensor.to(dtype=torch.float32)
rois = rois.to(device=device)
output = (
ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True)
.forward(bit_masks[:, None, :, :], rois)
.squeeze(1)
)
output = output >= 0.5
return output
def get_bounding_boxes(self) -> Boxes:
"""
Returns:
Boxes: tight bounding boxes around bitmasks.
If a mask is empty, it's bounding box will be all zero.
"""
boxes = torch.zeros(self.tensor.shape[0], 4, dtype=torch.float32)
x_any = torch.any(self.tensor, dim=1)
y_any = torch.any(self.tensor, dim=2)
for idx in range(self.tensor.shape[0]):
x = torch.where(x_any[idx, :])[0]
y = torch.where(y_any[idx, :])[0]
if len(x) > 0 and len(y) > 0:
boxes[idx, :] = torch.as_tensor(
[x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=torch.float32
)
return Boxes(boxes)
@staticmethod
def cat(bitmasks_list: List["BitMasks"]) -> "BitMasks":
"""
Concatenates a list of BitMasks into a single BitMasks
Arguments:
bitmasks_list (list[BitMasks])
Returns:
BitMasks: the concatenated BitMasks
"""
assert isinstance(bitmasks_list, (list, tuple))
assert len(bitmasks_list) > 0
assert all(isinstance(bitmask, BitMasks) for bitmask in bitmasks_list)
cat_bitmasks = type(bitmasks_list[0])(torch.cat([bm.tensor for bm in bitmasks_list], dim=0))
return cat_bitmasks
class PolygonMasks:
"""
This class stores the segmentation masks for all objects in one image, in the form of polygons.
Attributes:
polygons: list[list[ndarray]]. Each ndarray is a float64 vector representing a polygon.
"""
def __init__(self, polygons: List[List[Union[torch.Tensor, np.ndarray]]]):
"""
Arguments:
polygons (list[list[np.ndarray]]): The first
level of the list correspond to individual instances,
the second level to all the polygons that compose the
instance, and the third level to the polygon coordinates.
The third level array should have the format of
[x0, y0, x1, y1, ..., xn, yn] (n >= 3).
"""
if not isinstance(polygons, list):
raise ValueError(
"Cannot create PolygonMasks: Expect a list of list of polygons per image. "
"Got '{}' instead.".format(type(polygons))
)
def _make_array(t: Union[torch.Tensor, np.ndarray]) -> np.ndarray:
# Use float64 for higher precision, because why not?
# Always put polygons on CPU (self.to is a no-op) since they
# are supposed to be small tensors.
# May need to change this assumption if GPU placement becomes useful
if isinstance(t, torch.Tensor):
t = t.cpu().numpy()
return np.asarray(t).astype("float64")
def process_polygons(
polygons_per_instance: List[Union[torch.Tensor, np.ndarray]]
) -> List[np.ndarray]:
if not isinstance(polygons_per_instance, list):
raise ValueError(
"Cannot create polygons: Expect a list of polygons per instance. "
"Got '{}' instead.".format(type(polygons_per_instance))
)
# transform each polygon to a numpy array
polygons_per_instance = [_make_array(p) for p in polygons_per_instance]
for polygon in polygons_per_instance:
if len(polygon) % 2 != 0 or len(polygon) < 6:
raise ValueError(f"Cannot create a polygon from {len(polygon)} coordinates.")
return polygons_per_instance
self.polygons: List[List[np.ndarray]] = [
process_polygons(polygons_per_instance) for polygons_per_instance in polygons
]
def to(self, *args: Any, **kwargs: Any) -> "PolygonMasks":
return self
@property
def device(self) -> torch.device:
return torch.device("cpu")
def get_bounding_boxes(self) -> Boxes:
"""
Returns:
Boxes: tight bounding boxes around polygon masks.
"""
boxes = torch.zeros(len(self.polygons), 4, dtype=torch.float32)
for<fim_suffix><fim_middle> idx, polygons_per_instance in enumerate(self.polygons):
minxy = torch.as_tensor([float("inf"), float("inf")], dtype=torch.float32)
maxxy = torch.zeros(2, dtype=torch.float32)
for polygon in polygons_per_instance:
coords = torch.from_numpy(polygon).view(-1, 2).to(dtype=torch.float32)
minxy = torch.min(minxy, torch.min(coords, dim=0).values)
maxxy = torch.max(maxxy, torch.max(coords, dim=0).values)
boxes[idx, :2] = minxy
boxes[idx, 2:] = maxxy
|
idx, polygons_per_instance in enumerate(self.polygons):
minxy = torch.as_tensor([float("inf"), float("inf")], dtype=torch.float32)
maxxy = torch.zeros(2, dtype=torch.float32)
for polygon in polygons_per_instance:
coords = torch.from_numpy(polygon).view(-1, 2).to(dtype=torch.float32)
minxy = torch.min(minxy, torch.min(coords, dim=0).values)
maxxy = torch.max(maxxy, torch.max(coords, dim=0).values)
boxes[idx, :2] = minxy
boxes[idx, 2:] = maxxy
|
FOR
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/checkpoint/c2_model_loading.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates.
import copy
import logging
import re
from typing import Dict, List
import torch
from tabulate import tabulate
def convert_basic_c2_names(original_keys):
"""
Apply some basic name conversion to names in C2 weights.
It only deals with typical backbone models.
Args:
original_keys (list[str]):
Returns:
list[str]: The same number of strings matching those in original_keys.
"""
layer_keys = copy.deepcopy(original_keys)
layer_keys = [
{"pred_b": "linear_b", "pred_w": "linear_w"}.get(k, k) for k in layer_keys
] # some hard-coded mappings
layer_keys = [k.replace("_", ".") for k in layer_keys]
layer_keys = [re.sub("\\.b$", ".bias", k) for k in layer_keys]
layer_keys = [re.sub("\\.w$", ".weight", k) for k in layer_keys]
# Uniform both bn and gn names to "norm"
layer_keys = [re.sub("bn\\.s$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.bias$", "norm.bias", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.rm", "norm.running_mean", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.running.mean$", "norm.running_mean", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.riv$", "norm.running_var", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.running.var$", "norm.running_var", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.gamma$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.beta$", "norm.bias", k) for k in layer_keys]
layer_keys = [re.sub("gn\\.s$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("gn\\.bias$", "norm.bias", k) for k in layer_keys]
# stem
layer_keys = [re.sub("^res\\.conv1\\.norm\\.", "conv1.norm.", k) for k in layer_keys]
# to avoid mis-matching with "conv1" in other components (e.g. detection head)
layer_keys = [re.sub("^conv1\\.", "stem.conv1.", k) for k in layer_keys]
# layer1-4 is used by torchvision, however we follow the C2 naming strategy (res2-5)
# layer_keys = [re.sub("^res2.", "layer1.", k) for k in layer_keys]
# layer_keys = [re.sub("^res3.", "layer2.", k) for k in layer_keys]
# layer_keys = [re.sub("^res4.", "layer3.", k) for k in layer_keys]
# layer_keys = [re.sub("^res5.", "layer4.", k) for k in layer_keys]
# blocks
layer_keys = [k.replace(".branch1.", ".shortcut.") for k in layer_keys]
layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys]
layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys]
layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys]
# DensePose substitutions
layer_keys = [re.sub("^body.conv.fcn", "body_conv_fcn", k) for k in layer_keys]
layer_keys = [k.replace("AnnIndex.lowres", "ann_index_lowres") for k in layer_keys]
layer_keys = [k.replace("Index.UV.lowres", "index_uv_lowres") for k in layer_keys]
layer_keys = [k.replace("U.lowres", "u_lowres") for k in layer_keys]
layer_keys = [k.replace("V.lowres", "v_lowres") for k in layer_keys]
return layer_keys
def convert_c2_detectron_names(weights):
"""
Map Caffe2 Detectron weight names to Detectron2 names.
Args:
weights (dict): name -> tensor
Returns:
dict: detectron2 names -> tensor
dict: detectron2 names -> C2 names
"""
logger = logging.getLogger(__name__)
logger.info("Renaming Caffe2 weights ......")
original_keys = sorted(weights.keys())
layer_keys = copy.deepcopy(original_keys)
layer_keys = convert_basic_c2_names(layer_keys)
# --------------------------------------------------------------------------
# RPN hidden representation conv
# --------------------------------------------------------------------------
# FPN case
# In the C2 model, the RPN hidden layer conv is defined for FPN level 2 and then
# shared for all other levels, hence the appearance of "fpn2"
layer_keys = [
k.replace("conv.rpn.fpn2", "proposal_generator.rpn_head.conv") for k in layer_keys
]
# Non-FPN case
layer_keys = [k.replace("conv.rpn", "proposal_generator.rpn_head.conv") for k in layer_keys]
# --------------------------------------------------------------------------
# RPN box transformation conv
# --------------------------------------------------------------------------
# FPN case (see note above about "fpn2")
layer_keys = [
k.replace("rpn.bbox.pred.fpn2", "proposal_generator.rpn_head.anchor_deltas")
for k in layer_keys
]
layer_keys = [
k.replace("rpn.cls.logits.fpn2", "proposal_generator.rpn_head.objectness_logits")
for k in layer_keys
]
# Non-FPN case
layer_keys = [
k.replace("rpn.bbox.pred", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys
]
layer_keys = [
k.replace("rpn.cls.logits", "proposal_generator.rpn_head.objectness_logits")
for k in layer_keys
]
# --------------------------------------------------------------------------
# Fast R-CNN box head
# --------------------------------------------------------------------------
layer_keys = [re.sub("^bbox\\.pred", "bbox_pred", k) for k in layer_keys]
layer_keys = [re.sub("^cls\\.score", "cls_score", k) for k in layer_keys]
layer_keys = [re.sub("^fc6\\.", "box_head.fc1.", k) for k in layer_keys]
layer_keys = [re.sub("^fc7\\.", "box_head.fc2.", k) for k in layer_keys]
# 4conv1fc head tensor names: head_conv1_w, head_conv1_gn_s
layer_keys = [re.sub("^head\\.conv", "box_head.conv", k) for k in layer_keys]
# --------------------------------------------------------------------------
# FPN lateral and output convolutions
# --------------------------------------------------------------------------
def fpn_map(name):
"""
Look for keys with the following patterns:
1) Starts with "fpn.inner."
Example: "fpn.inner.res2.2.sum.lateral.weight"
Meaning: These are lateral pathway convolutions
2) Starts with "fpn.res"
Example: "fpn.res2.2.sum.weight"
Meaning: These are FPN output convolutions
"""
splits = name.split(".")
norm = ".norm" if "norm" in splits else ""
if name.startswith("fpn.inner."):
# splits example: ['fpn', 'inner', 'res2', '2', 'sum', 'lateral', 'weight']
stage = int(splits[2][len("res") :])
return "fpn_lateral{}{}.{}".format(stage, norm, splits[-1])
elif name.startswith("fpn.res"):
# splits example: ['fpn', 'res2', '2', 'sum', 'weight']
stage = int(splits[1][len("res") :])
return "fpn_output{}{}.{}".format(stage, norm, splits[-1])
return name
layer_keys = [fpn_map(k) for k in layer_keys]
# --------------------------------------------------------------------------
# Mask R-CNN mask head
# --------------------------------------------------------------------------
# roi_heads.StandardROIHeads case
layer_keys = [k.replace(".[mask].fcn", "mask_head.mask_fcn") for k in layer_keys]
layer_keys = [re.sub("^\\.mask\\.fcn", "mask_head.mask_fcn", k) for k in layer_keys]
layer_keys = [k.replace("mask.fcn.logits", "mask_head.predictor") for k in layer_keys]
# roi_heads.Res5ROIHeads case
layer_keys = [k.replace("conv5.mask", "mask_head.deconv") for k in layer_keys]
# --------------------------------------------------------------------------
# Keypoint R-CNN head
# --------------------------------------------------------------------------
# interestingly, the keypoint head convs have blob names that are simply "conv_fcnX"
layer_keys = [k.replace("conv.fcn", "roi_heads.keypoint_head.conv_fcn") for k in layer_keys]
layer_keys = [
k.replace("kps.score.lowres", "roi_heads.keypoint_head.score_lowres") for k in layer_keys
]
layer_keys = [k.replace("kps.score.", "roi_heads.keypoint_head.score.") for k in layer_keys]
# --------------------------------------------------------------------------
# Done with replacements
# --------------------------------------------------------------------------
assert len(set(layer_keys)) == len(layer_keys)
assert len(original_keys) == len(layer_keys)
new_weights = {}
new_keys_to_original_keys = {}
for orig, renamed in zip(original_keys, layer_keys):
new_keys_to_original_keys[renamed] = orig
if renamed.startswith("bbox_pred.") or renamed.startswith("mask_head.predictor."):
# remove the meaningless prediction weight for background class
new_start_idx = 4 if renamed.startswith("bbox_pred.") else 1
new_weights[renamed] = weights[orig][new_start_idx:]
logger.info(
"Remove prediction weight for background class in {}. The shape changes from "
"{} to {}.".format(
renamed, tuple(weights[orig].shape), tuple(new_weights[renamed].shape)
)
)
elif renamed.startswith("cls_score."):
# move weights of bg class from original index 0 to last index
logger.info(
"Move classification weights for background class in {} from index 0 to "
"index {}.".format(renamed, weights[orig].shape[0] - 1)
)
new_weights[renamed] = torch.cat([weights[orig][1:], weights[orig][:1]])
else:
new_weights[renamed] = weights[orig]
return new_weights, new_keys_to_original_keys
# Note the current matching is not symmetric.
# it assumes model_state_dict will have longer names.
def align_and_update_state_dicts(model_state_dict, ckpt_state_dict, c2_conversion=True):
"""
Match names between the two state-dict, and returns a new chkpt_state_dict with names
converted to match model_state_dict with heuristics. The returned dict can be later
loaded with fvcore checkpointer.
If `c2_conversion==True`, `ckpt_state_dict` is assumed to be a Caffe2
model and will be renamed at first.
Strategy: suppose that the models that we will create will have prefixes appended
to each of its keys, for example due to an extra level of nesting that the original
pre-trained weights from ImageNet won't contain. For example, model.state_dict()
might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains
res2.conv1.weight. We thus want to match both parameters together.
For that, we look for each model weight, look among all loaded keys if there is one
that is a suffix of the current weight name, and use it if that's the case.
If multiple matches exist, take the one with longest size
of the corresponding name. For example, for the same model as before, the pretrained
weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case,
we want to match backbone[0].body.conv1.weight to conv1.weight, and
backbone[0].body.res2.conv1.weight to res2.conv1.weight.
"""
model_keys = sorted(model_state_dict.keys())
if c2_conversion:
ckpt_state_dict, original_keys = convert_c2_detectron_names(ckpt_state_dict)
# original_keys: the name in the original dict (before renaming)
else:
original_keys = {x: x for x in ckpt_state_dict.keys()}
ckpt_keys = sorted(ckpt_state_dict.keys())
def match(a, b):
# Matched ckpt_key should be a complete (starts with '.') suffix.
# For example, roi_heads.mesh_head.whatever_conv1 does not match conv1,
# but matches whatever_conv1 or mesh_head.whatever_conv1.
return a == b or a.endswith("." + b)
# get a matrix of string matches, where each (i, j) entry correspond to the size of the
# ckpt_key string, if it matches
match_matrix = [len(j) if match(i, j) else 0 for i in model_keys for j in ckpt_keys]
match_matrix = torch.as_tensor(match_matrix).view(len(model_keys), len(ckpt_keys))
# use the matched one with longest size in case of multiple matches
max_match_size, idxs = match_matrix.max(1)
# remove indices that correspond to no-match
idxs[max_match_size == 0] = -1
logger = logging.getLogger(__name__)
# matched_pairs (matched checkpoint key --> matched model key)
matched_keys = {}
result_state_dict = {}
for<fim_suffix><fim_middle> idx_model, idx_ckpt in enumerate(idxs.tolist()):
if idx_ckpt == -1:
continue
key_model = model_keys[idx_model]
key_ckpt = ckpt_keys[idx_ckpt]
value_ckpt = ckpt_state_dict[key_ckpt]
shape_in_model = model_state_dict[key_model].shape
if shape_in_model != value_ckpt.shape:
logger.warning(
"Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format(
key_ckpt, value_ckpt.shape, key_model, shape_in_model
)
)
logger.warning(
"{} will not be loaded. Please double check and see if this is desired.".format(
key_ckpt
)
)
continue
assert key_model not in result_state_dict
result_state_dict[key_model] = value_ckpt
if key_ckpt in matched_keys: # already added to matched_keys
logger.error(
"Ambiguity found for {} in checkpoint!"
"It matches at least two keys in the model ({} and {}).".format(
key_ckpt, key_model, matched_keys[key_ckpt]
)
)
raise ValueError("Cannot match one checkpoint key to multiple keys in the model.")
matched_keys[key_ckpt] = key_model
|
idx_model, idx_ckpt in enumerate(idxs.tolist()):
if idx_ckpt == -1:
continue
key_model = model_keys[idx_model]
key_ckpt = ckpt_keys[idx_ckpt]
value_ckpt = ckpt_state_dict[key_ckpt]
shape_in_model = model_state_dict[key_model].shape
if shape_in_model != value_ckpt.shape:
logger.warning(
"Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format(
key_ckpt, value_ckpt.shape, key_model, shape_in_model
)
)
logger.warning(
"{} will not be loaded. Please double check and see if this is desired.".format(
key_ckpt
)
)
continue
assert key_model not in result_state_dict
result_state_dict[key_model] = value_ckpt
if key_ckpt in matched_keys: # already added to matched_keys
logger.error(
"Ambiguity found for {} in checkpoint!"
"It matches at least two keys in the model ({} and {}).".format(
key_ckpt, key_model, matched_keys[key_ckpt]
)
)
raise ValueError("Cannot match one checkpoint key to multiple keys in the model.")
matched_keys[key_ckpt] = key_model
|
FOR
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/structures/masks.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates.
import copy
import itertools
import numpy as np
from typing import Any, Iterator, List, Union
import pycocotools.mask as mask_util
import torch
from torch import device
from detectron2.layers.roi_align import ROIAlign
from detectron2.utils.memory import retry_if_cuda_oom
from .boxes import Boxes
def polygon_area(x, y):
# Using the shoelace formula
# https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
def polygons_to_bitmask(polygons: List[np.ndarray], height: int, width: int) -> np.ndarray:
"""
Args:
polygons (list[ndarray]): each array has shape (Nx2,)
height, width (int)
Returns:
ndarray: a bool mask of shape (height, width)
"""
if len(polygons) == 0:
# COCOAPI does not support empty polygons
return np.zeros((height, width)).astype(np.bool)
rles = mask_util.frPyObjects(polygons, height, width)
rle = mask_util.merge(rles)
return mask_util.decode(rle).astype(np.bool)
def rasterize_polygons_within_box(
polygons: List[np.ndarray], box: np.ndarray, mask_size: int
) -> torch.Tensor:
"""
Rasterize the polygons into a mask image and
crop the mask content in the given box.
The cropped mask is resized to (mask_size, mask_size).
This function is used when generating training targets for mask head in Mask R-CNN.
Given original ground-truth masks for an image, new ground-truth mask
training targets in the size of `mask_size x mask_size`
must be provided for each predicted box. This function will be called to
produce such targets.
Args:
polygons (list[ndarray[float]]): a list of polygons, which represents an instance.
box: 4-element numpy array
mask_size (int):
Returns:
Tensor: BoolTensor of shape (mask_size, mask_size)
"""
# 1. Shift the polygons w.r.t the boxes
w, h = box[2] - box[0], box[3] - box[1]
polygons = copy.deepcopy(polygons)
for p in polygons:
p[0::2] = p[0::2] - box[0]
p[1::2] = p[1::2] - box[1]
# 2. Rescale the polygons to the new box size
# max() to avoid division by small number
ratio_h = mask_size / max(h, 0.1)
ratio_w = mask_size / max(w, 0.1)
if ratio_h == ratio_w:
for p in polygons:
p *= ratio_h
else:
for p in polygons:
p[0::2] *= ratio_w
p[1::2] *= ratio_h
# 3. Rasterize the polygons with coco api
mask = polygons_to_bitmask(polygons, mask_size, mask_size)
mask = torch.from_numpy(mask)
return mask
class BitMasks:
"""
This class stores the segmentation masks for all objects in one image, in
the form of bitmaps.
Attributes:
tensor: bool Tensor of N,H,W, representing N instances in the image.
"""
def __init__(self, tensor: Union[torch.Tensor, np.ndarray]):
"""
Args:
tensor: bool Tensor of N,H,W, representing N instances in the image.
"""
device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
tensor = torch.as_tensor(tensor, dtype=torch.bool, device=device)
assert tensor.dim() == 3, tensor.size()
self.image_size = tensor.shape[1:]
self.tensor = tensor
@torch.jit.unused
def to(self, *args: Any, **kwargs: Any) -> "BitMasks":
return BitMasks(self.tensor.to(*args, **kwargs))
@property
def device(self) -> torch.device:
return self.tensor.device
@torch.jit.unused
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "BitMasks":
"""
Returns:
BitMasks: Create a new :class:`BitMasks` by indexing.
The following usage are allowed:
1. `new_masks = masks[3]`: return a `BitMasks` which contains only one mask.
2. `new_masks = masks[2:10]`: return a slice of masks.
3. `new_masks = masks[vector]`, where vector is a torch.BoolTensor
with `length = len(masks)`. Nonzero elements in the vector will be selected.
Note that the returned object might share storage with this object,
subject to Pytorch's indexing semantics.
"""
if isinstance(item, int):
return BitMasks(self.tensor[item].unsqueeze(0))
m = self.tensor[item]
assert m.dim() == 3, "Indexing on BitMasks with {} returns a tensor with shape {}!".format(
item, m.shape
)
return BitMasks(m)
@torch.jit.unused
def __iter__(self) -> torch.Tensor:
yield from self.tensor
@torch.jit.unused
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.tensor))
return s
def __len__(self) -> int:
return self.tensor.shape[0]
def nonempty(self) -> torch.Tensor:
"""
Find masks that are non-empty.
Returns:
Tensor: a BoolTensor which represents
whether each mask is empty (False) or non-empty (True).
"""
return self.tensor.flatten(1).any(dim=1)
@staticmethod
def from_polygon_masks(
polygon_masks: Union["PolygonMasks", List[List[np.ndarray]]], height: int, width: int
) -> "BitMasks":
"""
Args:
polygon_masks (list[list[ndarray]] or PolygonMasks)
height, width (int)
"""
if isinstance(polygon_masks, PolygonMasks):
polygon_masks = polygon_masks.polygons
masks = [polygons_to_bitmask(p, height, width) for p in polygon_masks]
if len(masks):
return BitMasks(torch.stack([torch.from_numpy(x) for x in masks]))
else:
return BitMasks(torch.empty(0, height, width, dtype=torch.bool))
@staticmethod
def from_roi_masks(roi_masks: "ROIMasks", height: int, width: int) -> "BitMasks":
"""
Args:
roi_masks:
height, width (int):
"""
return roi_masks.to_bitmasks(height, width)
def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:
"""
Crop each bitmask by the given box, and resize results to (mask_size, mask_size).
This can be used to prepare training targets for Mask R-CNN.
It has less reconstruction error compared to rasterization with polygons.
However we observe no difference in accuracy,
but BitMasks requires more memory to store all the masks.
Args:
boxes (Tensor): Nx4 tensor storing the boxes for each mask
mask_size (int): the size of the rasterized mask.
Returns:
Tensor:
A bool tensor of shape (N, mask_size, mask_size), where
N is the number of predicted boxes for this image.
"""
assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self))
device = self.tensor.device
batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None]
rois = torch.cat([batch_inds, boxes], dim=1) # Nx5
bit_masks = self.tensor.to(dtype=torch.float32)
rois = rois.to(device=device)
output = (
ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True)
.forward(bit_masks[:, None, :, :], rois)
.squeeze(1)
)
output = output >= 0.5
return output
def get_bounding_boxes(self) -> Boxes:
"""
Returns:
Boxes: tight bounding boxes around bitmasks.
If a mask is empty, it's bounding box will be all zero.
"""
boxes = torch.zeros(self.tensor.shape[0], 4, dtype=torch.float32)
x_any = torch.any(self.tensor, dim=1)
y_any = torch.any(self.tensor, dim=2)
for<fim_suffix><fim_middle> idx in range(self.tensor.shape[0]):
x = torch.where(x_any[idx, :])[0]
y = torch.where(y_any[idx, :])[0]
if len(x) > 0 and len(y) > 0:
boxes[idx, :] = torch.as_tensor(
[x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=torch.float32
)
|
idx in range(self.tensor.shape[0]):
x = torch.where(x_any[idx, :])[0]
y = torch.where(y_any[idx, :])[0]
if len(x) > 0 and len(y) > 0:
boxes[idx, :] = torch.as_tensor(
[x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=torch.float32
)
|
FOR
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/tracking/utils.py<fim_prefix>#!/usr/bin/env python3
from detectron2.structures import Instances
import numpy as np
from typing import List
def create_prediction_pairs(
instances: Instances,
prev_instances: Instances,
iou_all: np.ndarray,
threshold: float = 0.5,
) -> List:
"""
Args:
instances: predictions from current frame
prev_instances: predictions from previous frame
iou_all: 2D numpy array containing iou for each bbox pair
threshold: below the threshold, doesn't consider the pair of bbox is valid
Return:
List of bbox pairs
"""
bbox_pairs = []
for i in range(len(instances)):
for<fim_suffix><fim_middle> j in range(len(prev_instances)):
if iou_all[i, j] < threshold:
continue
bbox_pairs.append(
{
"idx": i,
"prev_idx": j,
"prev_id": prev_instances.ID[j],
"IoU": iou_all[i, j],
"prev_period": prev_instances.ID_period[j],
}
)
|
j in range(len(prev_instances)):
if iou_all[i, j] < threshold:
continue
bbox_pairs.append(
{
"idx": i,
"prev_idx": j,
"prev_id": prev_instances.ID[j],
"IoU": iou_all[i, j],
"prev_period": prev_instances.ID_period[j],
}
)
|
FOR
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/tracking/bbox_iou_tracker.py<fim_prefix>#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import copy
from typing import List
import numpy as np
import torch
from detectron2.config import configurable
from detectron2.structures import Boxes, Instances
from detectron2.structures.boxes import pairwise_iou
from ..config.config import CfgNode as CfgNode_
from .base_tracker import BaseTracker, TRACKER_HEADS_REGISTRY
@TRACKER_HEADS_REGISTRY.register()
class BBoxIOUTracker(BaseTracker):
"""
A bounding box tracker to assign ID based on IoU between current and previous instances
"""
@configurable
def __init__(
self,
*,
video_height: int,
video_width: int,
max_num_instances: int = 200,
max_lost_frame_count: int = 0,
min_box_rel_dim: float = 0.02,
min_instance_period: int = 1,
track_iou_threshold: float = 0.5,
**kwargs
):
"""
Args:
video_height: height the video frame
video_width: width of the video frame
max_num_instances: maximum number of id allowed to be tracked
max_lost_frame_count: maximum number of frame an id can lost tracking
exceed this number, an id is considered as lost
forever
min_box_rel_dim: a percentage, smaller than this dimension, a bbox is
removed from tracking
min_instance_period: an instance will be shown after this number of period
since its first showing up in the video
track_iou_threshold: iou threshold, below this number a bbox pair is removed
from tracking
"""
super().__init__(**kwargs)
self._video_height = video_height
self._video_width = video_width
self._max_num_instances = max_num_instances
self._max_lost_frame_count = max_lost_frame_count
self._min_box_rel_dim = min_box_rel_dim
self._min_instance_period = min_instance_period
self._track_iou_threshold = track_iou_threshold
@classmethod
def from_config(cls, cfg: CfgNode_):
"""
Old style initialization using CfgNode
Args:
cfg: D2 CfgNode, config file
Return:
dictionary storing arguments for __init__ method
"""
assert "VIDEO_HEIGHT" in cfg.TRACKER_HEADS
assert "VIDEO_WIDTH" in cfg.TRACKER_HEADS
video_height = cfg.TRACKER_HEADS.get("VIDEO_HEIGHT")
video_width = cfg.TRACKER_HEADS.get("VIDEO_WIDTH")
max_num_instances = cfg.TRACKER_HEADS.get("MAX_NUM_INSTANCES", 200)
max_lost_frame_count = cfg.TRACKER_HEADS.get("MAX_LOST_FRAME_COUNT", 0)
min_box_rel_dim = cfg.TRACKER_HEADS.get("MIN_BOX_REL_DIM", 0.02)
min_instance_period = cfg.TRACKER_HEADS.get("MIN_INSTANCE_PERIOD", 1)
track_iou_threshold = cfg.TRACKER_HEADS.get("TRACK_IOU_THRESHOLD", 0.5)
return {
"_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker",
"video_height": video_height,
"video_width": video_width,
"max_num_instances": max_num_instances,
"max_lost_frame_count": max_lost_frame_count,
"min_box_rel_dim": min_box_rel_dim,
"min_instance_period": min_instance_period,
"track_iou_threshold": track_iou_threshold
}
def update(self, instances: Instances) -> Instances:
"""
See BaseTracker description
"""
if instances.has("pred_keypoints"):
raise NotImplementedError("Need to add support for keypoints")
instances = self._initialize_extra_fields(instances)
if self._prev_instances is not None:
# calculate IoU of all bbox pairs
iou_all = pairwise_iou(
boxes1=instances.pred_boxes,
boxes2=self._prev_instances.pred_boxes,
)
# sort IoU in descending order
bbox_pairs = self._create_prediction_pairs(instances, iou_all)
# assign previous ID to current bbox if IoU > track_iou_threshold
self._reset_fields()
for bbox_pair in bbox_pairs:
idx = bbox_pair["idx"]
prev_id = bbox_pair["prev_id"]
if idx in self._matched_idx \
or prev_id in self._matched_ID \
or bbox_pair["IoU"] < self._track_iou_threshold:
continue
instances.ID[idx] = prev_id
instances.ID_period[idx] = bbox_pair["prev_period"] + 1
instances.lost_frame_count[idx] = 0
self._matched_idx.add(idx)
self._matched_ID.add(prev_id)
self._untracked_prev_idx.remove(bbox_pair["prev_idx"])
instances = self._assign_new_id(instances)
instances = self._merge_untracked_instances(instances)
self._prev_instances = copy.deepcopy(instances)
return instances
def _create_prediction_pairs(
self, instances: Instances, iou_all: np.ndarray
) -> List:
"""
For all instances in previous and current frames, create pairs. For each
pair, store index of the instance in current frame predcitions, index in
previous predictions, ID in previous predictions, IoU of the bboxes in this
pair, period in previous predictions.
Args:
instances: D2 Instances, for predictions of the current frame
iou_all: IoU for all bboxes pairs
Return:
A list of IoU for all pairs
"""
bbox_pairs = []
for i in range(len(instances)):
for<fim_suffix><fim_middle> j in range(len(self._prev_instances)):
bbox_pairs.append(
{
"idx": i,
"prev_idx": j,
"prev_id": self._prev_instances.ID[j],
"IoU": iou_all[i, j],
"prev_period": self._prev_instances.ID_period[j],
}
)
|
j in range(len(self._prev_instances)):
bbox_pairs.append(
{
"idx": i,
"prev_idx": j,
"prev_id": self._prev_instances.ID[j],
"IoU": iou_all[i, j],
"prev_period": self._prev_instances.ID_period[j],
}
)
|
FOR
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/tracking/bbox_iou_tracker.py<fim_prefix>#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import copy
from typing import List
import numpy as np
import torch
from detectron2.config import configurable
from detectron2.structures import Boxes, Instances
from detectron2.structures.boxes import pairwise_iou
from ..config.config import CfgNode as CfgNode_
from .base_tracker import BaseTracker, TRACKER_HEADS_REGISTRY
@TRACKER_HEADS_REGISTRY.register()
class BBoxIOUTracker(BaseTracker):
"""
A bounding box tracker to assign ID based on IoU between current and previous instances
"""
@configurable
def __init__(
self,
*,
video_height: int,
video_width: int,
max_num_instances: int = 200,
max_lost_frame_count: int = 0,
min_box_rel_dim: float = 0.02,
min_instance_period: int = 1,
track_iou_threshold: float = 0.5,
**kwargs
):
"""
Args:
video_height: height the video frame
video_width: width of the video frame
max_num_instances: maximum number of id allowed to be tracked
max_lost_frame_count: maximum number of frame an id can lost tracking
exceed this number, an id is considered as lost
forever
min_box_rel_dim: a percentage, smaller than this dimension, a bbox is
removed from tracking
min_instance_period: an instance will be shown after this number of period
since its first showing up in the video
track_iou_threshold: iou threshold, below this number a bbox pair is removed
from tracking
"""
super().__init__(**kwargs)
self._video_height = video_height
self._video_width = video_width
self._max_num_instances = max_num_instances
self._max_lost_frame_count = max_lost_frame_count
self._min_box_rel_dim = min_box_rel_dim
self._min_instance_period = min_instance_period
self._track_iou_threshold = track_iou_threshold
@classmethod
def from_config(cls, cfg: CfgNode_):
"""
Old style initialization using CfgNode
Args:
cfg: D2 CfgNode, config file
Return:
dictionary storing arguments for __init__ method
"""
assert "VIDEO_HEIGHT" in cfg.TRACKER_HEADS
assert "VIDEO_WIDTH" in cfg.TRACKER_HEADS
video_height = cfg.TRACKER_HEADS.get("VIDEO_HEIGHT")
video_width = cfg.TRACKER_HEADS.get("VIDEO_WIDTH")
max_num_instances = cfg.TRACKER_HEADS.get("MAX_NUM_INSTANCES", 200)
max_lost_frame_count = cfg.TRACKER_HEADS.get("MAX_LOST_FRAME_COUNT", 0)
min_box_rel_dim = cfg.TRACKER_HEADS.get("MIN_BOX_REL_DIM", 0.02)
min_instance_period = cfg.TRACKER_HEADS.get("MIN_INSTANCE_PERIOD", 1)
track_iou_threshold = cfg.TRACKER_HEADS.get("TRACK_IOU_THRESHOLD", 0.5)
return {
"_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker",
"video_height": video_height,
"video_width": video_width,
"max_num_instances": max_num_instances,
"max_lost_frame_count": max_lost_frame_count,
"min_box_rel_dim": min_box_rel_dim,
"min_instance_period": min_instance_period,
"track_iou_threshold": track_iou_threshold
}
def update(self, instances: Instances) -> Instances:
"""
See BaseTracker description
"""
if instances.has("pred_keypoints"):
raise NotImplementedError("Need to add support for keypoints")
instances = self._initialize_extra_fields(instances)
if self._prev_instances is not None:
# calculate IoU of all bbox pairs
iou_all = pairwise_iou(
boxes1=instances.pred_boxes,
boxes2=self._prev_instances.pred_boxes,
)
# sort IoU in descending order
bbox_pairs = self._create_prediction_pairs(instances, iou_all)
# assign previous ID to current bbox if IoU > track_iou_threshold
self._reset_fields()
for<fim_suffix><fim_middle> bbox_pair in bbox_pairs:
idx = bbox_pair["idx"]
prev_id = bbox_pair["prev_id"]
if idx in self._matched_idx \
or prev_id in self._matched_ID \
or bbox_pair["IoU"] < self._track_iou_threshold:
continue
instances.ID[idx] = prev_id
instances.ID_period[idx] = bbox_pair["prev_period"] + 1
instances.lost_frame_count[idx] = 0
self._matched_idx.add(idx)
self._matched_ID.add(prev_id)
self._untracked_prev_idx.remove(bbox_pair["prev_idx"])
|
bbox_pair in bbox_pairs:
idx = bbox_pair["idx"]
prev_id = bbox_pair["prev_id"]
if idx in self._matched_idx \
or prev_id in self._matched_ID \
or bbox_pair["IoU"] < self._track_iou_threshold:
continue
instances.ID[idx] = prev_id
instances.ID_period[idx] = bbox_pair["prev_period"] + 1
instances.lost_frame_count[idx] = 0
self._matched_idx.add(idx)
self._matched_ID.add(prev_id)
self._untracked_prev_idx.remove(bbox_pair["prev_idx"])
|
FOR
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/tracking/hungarian_tracker.py<fim_prefix>#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import copy
import numpy as np
import torch
from detectron2.structures import Boxes, Instances
from .base_tracker import BaseTracker
from scipy.optimize import linear_sum_assignment
from ..config.config import CfgNode as CfgNode_
from typing import Dict
from detectron2.config import configurable
class BaseHungarianTracker(BaseTracker):
"""
A base class for all Hungarian trackers
"""
@configurable
def __init__(
self,
video_height: int,
video_width: int,
max_num_instances: int = 200,
max_lost_frame_count: int = 0,
min_box_rel_dim: float = 0.02,
min_instance_period: int = 1,
**kwargs
):
"""
Args:
video_height: height the video frame
video_width: width of the video frame
max_num_instances: maximum number of id allowed to be tracked
max_lost_frame_count: maximum number of frame an id can lost tracking
exceed this number, an id is considered as lost
forever
min_box_rel_dim: a percentage, smaller than this dimension, a bbox is
removed from tracking
min_instance_period: an instance will be shown after this number of period
since its first showing up in the video
"""
super().__init__(**kwargs)
self._video_height = video_height
self._video_width = video_width
self._max_num_instances = max_num_instances
self._max_lost_frame_count = max_lost_frame_count
self._min_box_rel_dim = min_box_rel_dim
self._min_instance_period = min_instance_period
@classmethod
def from_config(cls, cfg: CfgNode_) -> Dict:
raise NotImplementedError("Calling HungarianTracker::from_config")
def build_cost_matrix(self, instances: Instances, prev_instances: Instances) -> np.ndarray:
raise NotImplementedError("Calling HungarianTracker::build_matrix")
def update(self, instances: Instances) -> Instances:
if instances.has("pred_keypoints"):
raise NotImplementedError("Need to add support for keypoints")
instances = self._initialize_extra_fields(instances)
if self._prev_instances is not None:
self._untracked_prev_idx = set(range(len(self._prev_instances)))
cost_matrix = self.build_cost_matrix(instances, self._prev_instances)
matched_idx, matched_prev_idx = linear_sum_assignment(cost_matrix)
instances = self._process_matched_idx(instances, matched_idx, matched_prev_idx)
instances = self._process_unmatched_idx(instances, matched_idx)
instances = self._process_unmatched_prev_idx(instances, matched_prev_idx)
self._prev_instances = copy.deepcopy(instances)
return instances
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
if not instances.has("ID"):
instances.set("ID", [None] * len(instances))
if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
if not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances))
if self._prev_instances is None:
instances.ID = list(range(len(instances)))
self._id_count += len(instances)
instances.ID_period = [1] * len(instances)
instances.lost_frame_count = [0] * len(instances)
return instances
def _process_matched_idx(
self,
instances: Instances,
matched_idx: np.ndarray,
matched_prev_idx: np.ndarray
) -> Instances:
assert matched_idx.size == matched_prev_idx.size
for<fim_suffix><fim_middle> i in range(matched_idx.size):
instances.ID[matched_idx[i]] = self._prev_instances.ID[matched_prev_idx[i]]
instances.ID_period[matched_idx[i]] = \
self._prev_instances.ID_period[matched_prev_idx[i]] + 1
instances.lost_frame_count[matched_idx[i]] = 0
|
i in range(matched_idx.size):
instances.ID[matched_idx[i]] = self._prev_instances.ID[matched_prev_idx[i]]
instances.ID_period[matched_idx[i]] = \
self._prev_instances.ID_period[matched_prev_idx[i]] + 1
instances.lost_frame_count[matched_idx[i]] = 0
|
FOR
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/tracking/bbox_iou_tracker.py<fim_prefix>#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import copy
from typing import List
import numpy as np
import torch
from detectron2.config import configurable
from detectron2.structures import Boxes, Instances
from detectron2.structures.boxes import pairwise_iou
from ..config.config import CfgNode as CfgNode_
from .base_tracker import BaseTracker, TRACKER_HEADS_REGISTRY
@TRACKER_HEADS_REGISTRY.register()
class BBoxIOUTracker(BaseTracker):
"""
A bounding box tracker to assign ID based on IoU between current and previous instances
"""
@configurable
def __init__(
self,
*,
video_height: int,
video_width: int,
max_num_instances: int = 200,
max_lost_frame_count: int = 0,
min_box_rel_dim: float = 0.02,
min_instance_period: int = 1,
track_iou_threshold: float = 0.5,
**kwargs
):
"""
Args:
video_height: height the video frame
video_width: width of the video frame
max_num_instances: maximum number of id allowed to be tracked
max_lost_frame_count: maximum number of frame an id can lost tracking
exceed this number, an id is considered as lost
forever
min_box_rel_dim: a percentage, smaller than this dimension, a bbox is
removed from tracking
min_instance_period: an instance will be shown after this number of period
since its first showing up in the video
track_iou_threshold: iou threshold, below this number a bbox pair is removed
from tracking
"""
super().__init__(**kwargs)
self._video_height = video_height
self._video_width = video_width
self._max_num_instances = max_num_instances
self._max_lost_frame_count = max_lost_frame_count
self._min_box_rel_dim = min_box_rel_dim
self._min_instance_period = min_instance_period
self._track_iou_threshold = track_iou_threshold
@classmethod
def from_config(cls, cfg: CfgNode_):
"""
Old style initialization using CfgNode
Args:
cfg: D2 CfgNode, config file
Return:
dictionary storing arguments for __init__ method
"""
assert "VIDEO_HEIGHT" in cfg.TRACKER_HEADS
assert "VIDEO_WIDTH" in cfg.TRACKER_HEADS
video_height = cfg.TRACKER_HEADS.get("VIDEO_HEIGHT")
video_width = cfg.TRACKER_HEADS.get("VIDEO_WIDTH")
max_num_instances = cfg.TRACKER_HEADS.get("MAX_NUM_INSTANCES", 200)
max_lost_frame_count = cfg.TRACKER_HEADS.get("MAX_LOST_FRAME_COUNT", 0)
min_box_rel_dim = cfg.TRACKER_HEADS.get("MIN_BOX_REL_DIM", 0.02)
min_instance_period = cfg.TRACKER_HEADS.get("MIN_INSTANCE_PERIOD", 1)
track_iou_threshold = cfg.TRACKER_HEADS.get("TRACK_IOU_THRESHOLD", 0.5)
return {
"_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker",
"video_height": video_height,
"video_width": video_width,
"max_num_instances": max_num_instances,
"max_lost_frame_count": max_lost_frame_count,
"min_box_rel_dim": min_box_rel_dim,
"min_instance_period": min_instance_period,
"track_iou_threshold": track_iou_threshold
}
def update(self, instances: Instances) -> Instances:
"""
See BaseTracker description
"""
if instances.has("pred_keypoints"):
raise NotImplementedError("Need to add support for keypoints")
instances = self._initialize_extra_fields(instances)
if self._prev_instances is not None:
# calculate IoU of all bbox pairs
iou_all = pairwise_iou(
boxes1=instances.pred_boxes,
boxes2=self._prev_instances.pred_boxes,
)
# sort IoU in descending order
bbox_pairs = self._create_prediction_pairs(instances, iou_all)
# assign previous ID to current bbox if IoU > track_iou_threshold
self._reset_fields()
for bbox_pair in bbox_pairs:
idx = bbox_pair["idx"]
prev_id = bbox_pair["prev_id"]
if idx in self._matched_idx \
or prev_id in self._matched_ID \
or bbox_pair["IoU"] < self._track_iou_threshold:
continue
instances.ID[idx] = prev_id
instances.ID_period[idx] = bbox_pair["prev_period"] + 1
instances.lost_frame_count[idx] = 0
self._matched_idx.add(idx)
self._matched_ID.add(prev_id)
self._untracked_prev_idx.remove(bbox_pair["prev_idx"])
instances = self._assign_new_id(instances)
instances = self._merge_untracked_instances(instances)
self._prev_instances = copy.deepcopy(instances)
return instances
def _create_prediction_pairs(
self, instances: Instances, iou_all: np.ndarray
) -> List:
"""
For all instances in previous and current frames, create pairs. For each
pair, store index of the instance in current frame predcitions, index in
previous predictions, ID in previous predictions, IoU of the bboxes in this
pair, period in previous predictions.
Args:
instances: D2 Instances, for predictions of the current frame
iou_all: IoU for all bboxes pairs
Return:
A list of IoU for all pairs
"""
bbox_pairs = []
for<fim_suffix><fim_middle> i in range(len(instances)):
for j in range(len(self._prev_instances)):
bbox_pairs.append(
{
"idx": i,
"prev_idx": j,
"prev_id": self._prev_instances.ID[j],
"IoU": iou_all[i, j],
"prev_period": self._prev_instances.ID_period[j],
}
)
|
i in range(len(instances)):
for j in range(len(self._prev_instances)):
bbox_pairs.append(
{
"idx": i,
"prev_idx": j,
"prev_id": self._prev_instances.ID[j],
"IoU": iou_all[i, j],
"prev_period": self._prev_instances.ID_period[j],
}
)
|
FOR
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/tracking/hungarian_tracker.py<fim_prefix>#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import copy
import numpy as np
import torch
from detectron2.structures import Boxes, Instances
from .base_tracker import BaseTracker
from scipy.optimize import linear_sum_assignment
from ..config.config import CfgNode as CfgNode_
from typing import Dict
from detectron2.config import configurable
class BaseHungarianTracker(BaseTracker):
"""
A base class for all Hungarian trackers
"""
@configurable
def __init__(
self,
video_height: int,
video_width: int,
max_num_instances: int = 200,
max_lost_frame_count: int = 0,
min_box_rel_dim: float = 0.02,
min_instance_period: int = 1,
**kwargs
):
"""
Args:
video_height: height the video frame
video_width: width of the video frame
max_num_instances: maximum number of id allowed to be tracked
max_lost_frame_count: maximum number of frame an id can lost tracking
exceed this number, an id is considered as lost
forever
min_box_rel_dim: a percentage, smaller than this dimension, a bbox is
removed from tracking
min_instance_period: an instance will be shown after this number of period
since its first showing up in the video
"""
super().__init__(**kwargs)
self._video_height = video_height
self._video_width = video_width
self._max_num_instances = max_num_instances
self._max_lost_frame_count = max_lost_frame_count
self._min_box_rel_dim = min_box_rel_dim
self._min_instance_period = min_instance_period
@classmethod
def from_config(cls, cfg: CfgNode_) -> Dict:
raise NotImplementedError("Calling HungarianTracker::from_config")
def build_cost_matrix(self, instances: Instances, prev_instances: Instances) -> np.ndarray:
raise NotImplementedError("Calling HungarianTracker::build_matrix")
def update(self, instances: Instances) -> Instances:
if instances.has("pred_keypoints"):
raise NotImplementedError("Need to add support for keypoints")
instances = self._initialize_extra_fields(instances)
if self._prev_instances is not None:
self._untracked_prev_idx = set(range(len(self._prev_instances)))
cost_matrix = self.build_cost_matrix(instances, self._prev_instances)
matched_idx, matched_prev_idx = linear_sum_assignment(cost_matrix)
instances = self._process_matched_idx(instances, matched_idx, matched_prev_idx)
instances = self._process_unmatched_idx(instances, matched_idx)
instances = self._process_unmatched_prev_idx(instances, matched_prev_idx)
self._prev_instances = copy.deepcopy(instances)
return instances
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
if not instances.has("ID"):
instances.set("ID", [None] * len(instances))
if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
if not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances))
if self._prev_instances is None:
instances.ID = list(range(len(instances)))
self._id_count += len(instances)
instances.ID_period = [1] * len(instances)
instances.lost_frame_count = [0] * len(instances)
return instances
def _process_matched_idx(
self,
instances: Instances,
matched_idx: np.ndarray,
matched_prev_idx: np.ndarray
) -> Instances:
assert matched_idx.size == matched_prev_idx.size
for i in range(matched_idx.size):
instances.ID[matched_idx[i]] = self._prev_instances.ID[matched_prev_idx[i]]
instances.ID_period[matched_idx[i]] = \
self._prev_instances.ID_period[matched_prev_idx[i]] + 1
instances.lost_frame_count[matched_idx[i]] = 0
return instances
def _process_unmatched_idx(self, instances: Instances, matched_idx: np.ndarray) -> Instances:
untracked_idx = set(range(len(instances))).difference(set(matched_idx))
for<fim_suffix><fim_middle> idx in untracked_idx:
instances.ID[idx] = self._id_count
self._id_count += 1
instances.ID_period[idx] = 1
instances.lost_frame_count[idx] = 0
|
idx in untracked_idx:
instances.ID[idx] = self._id_count
self._id_count += 1
instances.ID_period[idx] = 1
instances.lost_frame_count[idx] = 0
|
FOR
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/external/davis2017-evaluation/davis2017/metrics.py<fim_prefix>import math
import numpy as np
import cv2
def db_eval_iou(annotation, segmentation, void_pixels=None):
""" Compute region similarity as the Jaccard Index.
Arguments:
annotation (ndarray): binary annotation map.
segmentation (ndarray): binary segmentation map.
void_pixels (ndarray): optional mask with void pixels
Return:
jaccard (float): region similarity
"""
assert annotation.shape == segmentation.shape, \
f'Annotation({annotation.shape}) and segmentation:{segmentation.shape} dimensions do not match.'
annotation = annotation.astype(np.bool)
segmentation = segmentation.astype(np.bool)
if void_pixels is not None:
assert annotation.shape == void_pixels.shape, \
f'Annotation({annotation.shape}) and void pixels:{void_pixels.shape} dimensions do not match.'
void_pixels = void_pixels.astype(np.bool)
else:
void_pixels = np.zeros_like(segmentation)
# Intersection between all sets
inters = np.sum((segmentation & annotation) & np.logical_not(void_pixels), axis=(-2, -1))
union = np.sum((segmentation | annotation) & np.logical_not(void_pixels), axis=(-2, -1))
j = inters / union
if j.ndim == 0:
j = 1 if np.isclose(union, 0) else j
else:
j[np.isclose(union, 0)] = 1
return j
def db_eval_boundary(annotation, segmentation, void_pixels=None, bound_th=0.008):
assert annotation.shape == segmentation.shape
if void_pixels is not None:
assert annotation.shape == void_pixels.shape
if annotation.ndim == 3:
n_frames = annotation.shape[0]
f_res = np.zeros(n_frames)
for frame_id in range(n_frames):
void_pixels_frame = None if void_pixels is None else void_pixels[frame_id, :, :, ]
f_res[frame_id] = f_measure(segmentation[frame_id, :, :, ], annotation[frame_id, :, :], void_pixels_frame, bound_th=bound_th)
elif annotation.ndim == 2:
f_res = f_measure(segmentation, annotation, void_pixels, bound_th=bound_th)
else:
raise ValueError(f'db_eval_boundary does not support tensors with {annotation.ndim} dimensions')
return f_res
def f_measure(foreground_mask, gt_mask, void_pixels=None, bound_th=0.008):
"""
Compute mean,recall and decay from per-frame evaluation.
Calculates precision/recall for boundaries between foreground_mask and
gt_mask using morphological operators to speed it up.
Arguments:
foreground_mask (ndarray): binary segmentation image.
gt_mask (ndarray): binary annotated image.
void_pixels (ndarray): optional mask with void pixels
Returns:
F (float): boundaries F-measure
"""
assert np.atleast_3d(foreground_mask).shape[2] == 1
if void_pixels is not None:
void_pixels = void_pixels.astype(np.bool)
else:
void_pixels = np.zeros_like(foreground_mask).astype(np.bool)
bound_pix = bound_th if bound_th >= 1 else \
np.ceil(bound_th * np.linalg.norm(foreground_mask.shape))
# Get the pixel boundaries of both masks
fg_boundary = _seg2bmap(foreground_mask * np.logical_not(void_pixels))
gt_boundary = _seg2bmap(gt_mask * np.logical_not(void_pixels))
from skimage.morphology import disk
# fg_dil = binary_dilation(fg_boundary, disk(bound_pix))
fg_dil = cv2.dilate(fg_boundary.astype(np.uint8), disk(bound_pix).astype(np.uint8))
# gt_dil = binary_dilation(gt_boundary, disk(bound_pix))
gt_dil = cv2.dilate(gt_boundary.astype(np.uint8), disk(bound_pix).astype(np.uint8))
# Get the intersection
gt_match = gt_boundary * fg_dil
fg_match = fg_boundary * gt_dil
# Area of the intersection
n_fg = np.sum(fg_boundary)
n_gt = np.sum(gt_boundary)
# % Compute precision and recall
if n_fg == 0 and n_gt > 0:
precision = 1
recall = 0
elif n_fg > 0 and n_gt == 0:
precision = 0
recall = 1
elif n_fg == 0 and n_gt == 0:
precision = 1
recall = 1
else:
precision = np.sum(fg_match) / float(n_fg)
recall = np.sum(gt_match) / float(n_gt)
# Compute F measure
if precision + recall == 0:
F = 0
else:
F = 2 * precision * recall / (precision + recall)
return F
def _seg2bmap(seg, width=None, height=None):
"""
From a segmentation, compute a binary boundary map with 1 pixel wide
boundaries. The boundary pixels are offset by 1/2 pixel towards the
origin from the actual segment boundary.
Arguments:
seg : Segments labeled from 1..k.
width : Width of desired bmap <= seg.shape[1]
height : Height of desired bmap <= seg.shape[0]
Returns:
bmap (ndarray): Binary boundary map.
David Martin <[email protected]>
January 2003
"""
seg = seg.astype(np.bool)
seg[seg > 0] = 1
assert np.atleast_3d(seg).shape[2] == 1
width = seg.shape[1] if width is None else width
height = seg.shape[0] if height is None else height
h, w = seg.shape[:2]
ar1 = float(width) / float(height)
ar2 = float(w) / float(h)
assert not (
width > w | height > h | abs(ar1 - ar2) > 0.01
), "Can" "t convert %dx%d seg to %dx%d bmap." % (w, h, width, height)
e = np.zeros_like(seg)
s = np.zeros_like(seg)
se = np.zeros_like(seg)
e[:, :-1] = seg[:, 1:]
s[:-1, :] = seg[1:, :]
se[:-1, :-1] = seg[1:, 1:]
b = seg ^ e | seg ^ s | seg ^ se
b[-1, :] = seg[-1, :] ^ e[-1, :]
b[:, -1] = seg[:, -1] ^ s[:, -1]
b[-1, -1] = 0
if w == width and h == height:
bmap = b
else:
bmap = np.zeros((height, width))
for x in range(w):
for<fim_suffix><fim_middle> y in range(h):
if b[y, x]:
j = 1 + math.floor((y - 1) + height / h)
i = 1 + math.floor((x - 1) + width / h)
bmap[j, i] = 1
|
y in range(h):
if b[y, x]:
j = 1 + math.floor((y - 1) + height / h)
i = 1 + math.floor((x - 1) + width / h)
bmap[j, i] = 1
|
FOR
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/config/instantiate.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates.
import dataclasses
import logging
from collections import abc
from typing import Any
from detectron2.utils.registry import _convert_target_to_string, locate
__all__ = ["dump_dataclass", "instantiate"]
def dump_dataclass(obj: Any):
"""
Dump a dataclass recursively into a dict that can be later instantiated.
Args:
obj: a dataclass object
Returns:
dict
"""
assert dataclasses.is_dataclass(obj) and not isinstance(
obj, type
), "dump_dataclass() requires an instance of a dataclass."
ret = {"_target_": _convert_target_to_string(type(obj))}
for f in dataclasses.fields(obj):
v = getattr(obj, f.name)
if dataclasses.is_dataclass(v):
v = dump_dataclass(v)
if isinstance(v, (list, tuple)):
v = [dump_dataclass(x) if dataclasses.is_dataclass(x) else x for x in v]
ret[f.name] = v
return ret
def instantiate(cfg):
"""
Recursively instantiate objects defined in dictionaries by
"_target_" and arguments.
Args:
cfg: a dict-like object with "_target_" that defines the caller, and
other keys that define the arguments
Returns:
object instantiated by cfg
"""
from<fim_suffix><fim_middle> omegaconf import ListConfig
|
omegaconf import ListConfig
|
IMPORT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/layers/roi_align.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates.
from torch import nn
from torchvision.ops import roi_align
# NOTE: torchvision's RoIAlign has a different default aligned=False
class ROIAlign(nn.Module):
def __init__(self, output_size, spatial_scale, sampling_ratio, aligned=True):
"""
Args:
output_size (tuple): h, w
spatial_scale (float): scale the input boxes by this number
sampling_ratio (int): number of inputs samples to take for each output
sample. 0 to take samples densely.
aligned (bool): if False, use the legacy implementation in
Detectron. If True, align the results more perfectly.
Note:
The meaning of aligned=True:
Given a continuous coordinate c, its two neighboring pixel indices (in our
pixel model) are computed by floor(c - 0.5) and ceil(c - 0.5). For example,
c=1.3 has pixel neighbors with discrete indices [0] and [1] (which are sampled
from the underlying signal at continuous coordinates 0.5 and 1.5). But the original
roi_align (aligned=False) does not subtract the 0.5 when computing neighboring
pixel indices and therefore it uses pixels with a slightly incorrect alignment
(relative to our pixel model) when performing bilinear interpolation.
With `aligned=True`,
we first appropriately scale the ROI and then shift it by -0.5
prior to calling roi_align. This produces the correct neighbors; see
detectron2/tests/test_roi_align.py for verification.
The difference does not make a difference to the model's performance if
ROIAlign is used together with conv layers.
"""
super().__init__()
self.output_size = output_size
self.spatial_scale = spatial_scale
self.sampling_ratio = sampling_ratio
self.aligned = aligned
from<fim_suffix><fim_middle> torchvision import __version__
|
torchvision import __version__
|
IMPORT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/external/davis2017-evaluation/davis2017/metrics.py<fim_prefix>import math
import numpy as np
import cv2
def db_eval_iou(annotation, segmentation, void_pixels=None):
""" Compute region similarity as the Jaccard Index.
Arguments:
annotation (ndarray): binary annotation map.
segmentation (ndarray): binary segmentation map.
void_pixels (ndarray): optional mask with void pixels
Return:
jaccard (float): region similarity
"""
assert annotation.shape == segmentation.shape, \
f'Annotation({annotation.shape}) and segmentation:{segmentation.shape} dimensions do not match.'
annotation = annotation.astype(np.bool)
segmentation = segmentation.astype(np.bool)
if void_pixels is not None:
assert annotation.shape == void_pixels.shape, \
f'Annotation({annotation.shape}) and void pixels:{void_pixels.shape} dimensions do not match.'
void_pixels = void_pixels.astype(np.bool)
else:
void_pixels = np.zeros_like(segmentation)
# Intersection between all sets
inters = np.sum((segmentation & annotation) & np.logical_not(void_pixels), axis=(-2, -1))
union = np.sum((segmentation | annotation) & np.logical_not(void_pixels), axis=(-2, -1))
j = inters / union
if j.ndim == 0:
j = 1 if np.isclose(union, 0) else j
else:
j[np.isclose(union, 0)] = 1
return j
def db_eval_boundary(annotation, segmentation, void_pixels=None, bound_th=0.008):
assert annotation.shape == segmentation.shape
if void_pixels is not None:
assert annotation.shape == void_pixels.shape
if annotation.ndim == 3:
n_frames = annotation.shape[0]
f_res = np.zeros(n_frames)
for frame_id in range(n_frames):
void_pixels_frame = None if void_pixels is None else void_pixels[frame_id, :, :, ]
f_res[frame_id] = f_measure(segmentation[frame_id, :, :, ], annotation[frame_id, :, :], void_pixels_frame, bound_th=bound_th)
elif annotation.ndim == 2:
f_res = f_measure(segmentation, annotation, void_pixels, bound_th=bound_th)
else:
raise ValueError(f'db_eval_boundary does not support tensors with {annotation.ndim} dimensions')
return f_res
def f_measure(foreground_mask, gt_mask, void_pixels=None, bound_th=0.008):
"""
Compute mean,recall and decay from per-frame evaluation.
Calculates precision/recall for boundaries between foreground_mask and
gt_mask using morphological operators to speed it up.
Arguments:
foreground_mask (ndarray): binary segmentation image.
gt_mask (ndarray): binary annotated image.
void_pixels (ndarray): optional mask with void pixels
Returns:
F (float): boundaries F-measure
"""
assert np.atleast_3d(foreground_mask).shape[2] == 1
if void_pixels is not None:
void_pixels = void_pixels.astype(np.bool)
else:
void_pixels = np.zeros_like(foreground_mask).astype(np.bool)
bound_pix = bound_th if bound_th >= 1 else \
np.ceil(bound_th * np.linalg.norm(foreground_mask.shape))
# Get the pixel boundaries of both masks
fg_boundary = _seg2bmap(foreground_mask * np.logical_not(void_pixels))
gt_boundary = _seg2bmap(gt_mask * np.logical_not(void_pixels))
from<fim_suffix><fim_middle> skimage.morphology import disk
|
skimage.morphology import disk
|
IMPORT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/utils/registry.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates.
from typing import Any
import pydoc
from fvcore.common.registry import Registry # for backward compatibility.
"""
``Registry`` and `locate` provide ways to map a string (typically found
in config files) to callable objects.
"""
__all__ = ["Registry", "locate"]
def _convert_target_to_string(t: Any) -> str:
"""
Inverse of ``locate()``.
Args:
t: any object with ``__module__`` and ``__qualname__``
"""
module, qualname = t.__module__, t.__qualname__
# Compress the path to this object, e.g. ``module.submodule._impl.class``
# may become ``module.submodule.class``, if the later also resolves to the same
# object. This simplifies the string, and also is less affected by moving the
# class implementation.
module_parts = module.split(".")
for k in range(1, len(module_parts)):
prefix = ".".join(module_parts[:k])
candidate = f"{prefix}.{qualname}"
try:
if locate(candidate) is t:
return candidate
except ImportError:
pass
return f"{module}.{qualname}"
def locate(name: str) -> Any:
"""
Locate and return an object ``x`` using an input string ``{x.__module__}.{x.__qualname__}``,
such as "module.submodule.class_name".
Raise Exception if it cannot be found.
"""
obj = pydoc.locate(name)
# Some cases (e.g. torch.optim.sgd.SGD) not handled correctly
# by pydoc.locate. Try a private function from hydra.
if obj is None:
try:
# from hydra.utils import get_method - will print many errors
from<fim_suffix><fim_middle> hydra.utils import _locate
|
hydra.utils import _locate
|
IMPORT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/config/config.py<fim_prefix># -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import functools
import inspect
import logging
from fvcore.common.config import CfgNode as _CfgNode
from detectron2.utils.file_io import PathManager
class CfgNode(_CfgNode):
"""
The same as `fvcore.common.config.CfgNode`, but different in:
1. Use unsafe yaml loading by default.
Note that this may lead to arbitrary code execution: you must not
load a config file from untrusted sources before manually inspecting
the content of the file.
2. Support config versioning.
When attempting to merge an old config, it will convert the old config automatically.
.. automethod:: clone
.. automethod:: freeze
.. automethod:: defrost
.. automethod:: is_frozen
.. automethod:: load_yaml_with_base
.. automethod:: merge_from_list
.. automethod:: merge_from_other_cfg
"""
@classmethod
def _open_cfg(cls, filename):
return PathManager.open(filename, "r")
# Note that the default value of allow_unsafe is changed to True
def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None:
"""
Load content from the given config file and merge it into self.
Args:
cfg_filename: config filename
allow_unsafe: allow unsafe yaml syntax
"""
assert PathManager.isfile(cfg_filename), f"Config file '{cfg_filename}' does not exist!"
loaded_cfg = self.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe)
loaded_cfg = type(self)(loaded_cfg)
# defaults.py needs to import CfgNode
from .defaults import _C
latest_ver = _C.VERSION
assert (
latest_ver == self.VERSION
), "CfgNode.merge_from_file is only allowed on a config object of latest version!"
logger = logging.getLogger(__name__)
loaded_ver = loaded_cfg.get("VERSION", None)
if loaded_ver is None:
from .compat import guess_version
loaded_ver = guess_version(loaded_cfg, cfg_filename)
assert loaded_ver <= self.VERSION, "Cannot merge a v{} config into a v{} config.".format(
loaded_ver, self.VERSION
)
if loaded_ver == self.VERSION:
self.merge_from_other_cfg(loaded_cfg)
else:
# compat.py needs to import CfgNode
from .compat import upgrade_config, downgrade_config
logger.warning(
"Loading an old v{} config file '{}' by automatically upgrading to v{}. "
"See docs/CHANGELOG.md for instructions to update your files.".format(
loaded_ver, cfg_filename, self.VERSION
)
)
# To convert, first obtain a full config at an old version
old_self = downgrade_config(self, to_version=loaded_ver)
old_self.merge_from_other_cfg(loaded_cfg)
new_config = upgrade_config(old_self)
self.clear()
self.update(new_config)
def dump(self, *args, **kwargs):
"""
Returns:
str: a yaml string representation of the config
"""
# to make it show up in docs
return super().dump(*args, **kwargs)
global_cfg = CfgNode()
def get_cfg() -> CfgNode:
"""
Get a copy of the default config.
Returns:
a detectron2 CfgNode instance.
"""
from<fim_suffix><fim_middle> .defaults import _C
|
.defaults import _C
|
IMPORT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/checkpoint/c2_model_loading.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates.
import copy
import logging
import re
from typing import Dict, List
import torch
from tabulate import tabulate
def convert_basic_c2_names(original_keys):
"""
Apply some basic name conversion to names in C2 weights.
It only deals with typical backbone models.
Args:
original_keys (list[str]):
Returns:
list[str]: The same number of strings matching those in original_keys.
"""
layer_keys = copy.deepcopy(original_keys)
layer_keys = [
{"pred_b": "linear_b", "pred_w": "linear_w"}.get(k, k) for k in layer_keys
] # some hard-coded mappings
layer_keys = [k.replace("_", ".") for k in layer_keys]
layer_keys = [re.sub("\\.b$", ".bias", k) for k in layer_keys]
layer_keys = [re.sub("\\.w$", ".weight", k) for k in layer_keys]
# Uniform both bn and gn names to "norm"
layer_keys = [re.sub("bn\\.s$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.bias$", "norm.bias", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.rm", "norm.running_mean", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.running.mean$", "norm.running_mean", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.riv$", "norm.running_var", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.running.var$", "norm.running_var", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.gamma$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.beta$", "norm.bias", k) for k in layer_keys]
layer_keys = [re.sub("gn\\.s$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("gn\\.bias$", "norm.bias", k) for k in layer_keys]
# stem
layer_keys = [re.sub("^res\\.conv1\\.norm\\.", "conv1.norm.", k) for k in layer_keys]
# to avoid mis-matching with "conv1" in other components (e.g. detection head)
layer_keys = [re.sub("^conv1\\.", "stem.conv1.", k) for k in layer_keys]
# layer1-4 is used by torchvision, however we follow the C2 naming strategy (res2-5)
# layer_keys = [re.sub("^res2.", "layer1.", k) for k in layer_keys]
# layer_keys = [re.sub("^res3.", "layer2.", k) for k in layer_keys]
# layer_keys = [re.sub("^res4.", "layer3.", k) for k in layer_keys]
# layer_keys = [re.sub("^res5.", "layer4.", k) for k in layer_keys]
# blocks
layer_keys = [k.replace(".branch1.", ".shortcut.") for k in layer_keys]
layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys]
layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys]
layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys]
# DensePose substitutions
layer_keys = [re.sub("^body.conv.fcn", "body_conv_fcn", k) for k in layer_keys]
layer_keys = [k.replace("AnnIndex.lowres", "ann_index_lowres") for k in layer_keys]
layer_keys = [k.replace("Index.UV.lowres", "index_uv_lowres") for k in layer_keys]
layer_keys = [k.replace("U.lowres", "u_lowres") for k in layer_keys]
layer_keys = [k.replace("V.lowres", "v_lowres") for k in layer_keys]
return layer_keys
def convert_c2_detectron_names(weights):
"""
Map Caffe2 Detectron weight names to Detectron2 names.
Args:
weights (dict): name -> tensor
Returns:
dict: detectron2 names -> tensor
dict: detectron2 names -> C2 names
"""
logger = logging.getLogger(__name__)
logger.info("Renaming Caffe2 weights ......")
original_keys = sorted(weights.keys())
layer_keys = copy.deepcopy(original_keys)
layer_keys = convert_basic_c2_names(layer_keys)
# --------------------------------------------------------------------------
# RPN hidden representation conv
# --------------------------------------------------------------------------
# FPN case
# In the C2 model, the RPN hidden layer conv is defined for FPN level 2 and then
# shared for all other levels, hence the appearance of "fpn2"
layer_keys = [
k.replace("conv.rpn.fpn2", "proposal_generator.rpn_head.conv") for k in layer_keys
]
# Non-FPN case
layer_keys = [k.replace("conv.rpn", "proposal_generator.rpn_head.conv") for k in layer_keys]
# --------------------------------------------------------------------------
# RPN box transformation conv
# --------------------------------------------------------------------------
# FPN case (see note above about "fpn2")
layer_keys = [
k.replace("rpn.bbox.pred.fpn2", "proposal_generator.rpn_head.anchor_deltas")
for k in layer_keys
]
layer_keys = [
k.replace("rpn.cls.logits.fpn2", "proposal_generator.rpn_head.objectness_logits")
for k in layer_keys
]
# Non-FPN case
layer_keys = [
k.replace("rpn.bbox.pred", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys
]
layer_keys = [
k.replace("rpn.cls.logits", "proposal_generator.rpn_head.objectness_logits")
for k in layer_keys
]
# --------------------------------------------------------------------------
# Fast R-CNN box head
# --------------------------------------------------------------------------
layer_keys = [re.sub("^bbox\\.pred", "bbox_pred", k) for k in layer_keys]
layer_keys = [re.sub("^cls\\.score", "cls_score", k) for k in layer_keys]
layer_keys = [re.sub("^fc6\\.", "box_head.fc1.", k) for k in layer_keys]
layer_keys = [re.sub("^fc7\\.", "box_head.fc2.", k) for k in layer_keys]
# 4conv1fc head tensor names: head_conv1_w, head_conv1_gn_s
layer_keys = [re.sub("^head\\.conv", "box_head.conv", k) for k in layer_keys]
# --------------------------------------------------------------------------
# FPN lateral and output convolutions
# --------------------------------------------------------------------------
def fpn_map(name):
"""
Look for keys with the following patterns:
1) Starts with "fpn.inner."
Example: "fpn.inner.res2.2.sum.lateral.weight"
Meaning: These are lateral pathway convolutions
2) Starts with "fpn.res"
Example: "fpn.res2.2.sum.weight"
Meaning: These are FPN output convolutions
"""
splits = name.split(".")
norm = ".norm" if "norm" in splits else ""
if name.startswith("fpn.inner."):
# splits example: ['fpn', 'inner', 'res2', '2', 'sum', 'lateral', 'weight']
stage = int(splits[2][len("res") :])
return "fpn_lateral{}{}.{}".format(stage, norm, splits[-1])
elif name.startswith("fpn.res"):
# splits example: ['fpn', 'res2', '2', 'sum', 'weight']
stage = int(splits[1][len("res") :])
return "fpn_output{}{}.{}".format(stage, norm, splits[-1])
return name
layer_keys = [fpn_map(k) for k in layer_keys]
# --------------------------------------------------------------------------
# Mask R-CNN mask head
# --------------------------------------------------------------------------
# roi_heads.StandardROIHeads case
layer_keys = [k.replace(".[mask].fcn", "mask_head.mask_fcn") for k in layer_keys]
layer_keys = [re.sub("^\\.mask\\.fcn", "mask_head.mask_fcn", k) for k in layer_keys]
layer_keys = [k.replace("mask.fcn.logits", "mask_head.predictor") for k in layer_keys]
# roi_heads.Res5ROIHeads case
layer_keys = [k.replace("conv5.mask", "mask_head.deconv") for k in layer_keys]
# --------------------------------------------------------------------------
# Keypoint R-CNN head
# --------------------------------------------------------------------------
# interestingly, the keypoint head convs have blob names that are simply "conv_fcnX"
layer_keys = [k.replace("conv.fcn", "roi_heads.keypoint_head.conv_fcn") for k in layer_keys]
layer_keys = [
k.replace("kps.score.lowres", "roi_heads.keypoint_head.score_lowres") for k in layer_keys
]
layer_keys = [k.replace("kps.score.", "roi_heads.keypoint_head.score.") for k in layer_keys]
# --------------------------------------------------------------------------
# Done with replacements
# --------------------------------------------------------------------------
assert len(set(layer_keys)) == len(layer_keys)
assert len(original_keys) == len(layer_keys)
new_weights = {}
new_keys_to_original_keys = {}
for orig, renamed in zip(original_keys, layer_keys):
new_keys_to_original_keys[renamed] = orig
if renamed.startswith("bbox_pred.") or renamed.startswith("mask_head.predictor."):
# remove the meaningless prediction weight for background class
new_start_idx = 4 if renamed.startswith("bbox_pred.") else 1
new_weights[renamed] = weights[orig][new_start_idx:]
logger.info(
"Remove prediction weight for background class in {}. The shape changes from "
"{} to {}.".format(
renamed, tuple(weights[orig].shape), tuple(new_weights[renamed].shape)
)
)
elif renamed.startswith("cls_score."):
# move weights of bg class from original index 0 to last index
logger.info(
"Move classification weights for background class in {} from index 0 to "
"index {}.".format(renamed, weights[orig].shape[0] - 1)
)
new_weights[renamed] = torch.cat([weights[orig][1:], weights[orig][:1]])
else:
new_weights[renamed] = weights[orig]
return new_weights, new_keys_to_original_keys
# Note the current matching is not symmetric.
# it assumes model_state_dict will have longer names.
def align_and_update_state_dicts(model_state_dict, ckpt_state_dict, c2_conversion=True):
"""
Match names between the two state-dict, and returns a new chkpt_state_dict with names
converted to match model_state_dict with heuristics. The returned dict can be later
loaded with fvcore checkpointer.
If `c2_conversion==True`, `ckpt_state_dict` is assumed to be a Caffe2
model and will be renamed at first.
Strategy: suppose that the models that we will create will have prefixes appended
to each of its keys, for example due to an extra level of nesting that the original
pre-trained weights from ImageNet won't contain. For example, model.state_dict()
might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains
res2.conv1.weight. We thus want to match both parameters together.
For that, we look for each model weight, look among all loaded keys if there is one
that is a suffix of the current weight name, and use it if that's the case.
If multiple matches exist, take the one with longest size
of the corresponding name. For example, for the same model as before, the pretrained
weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case,
we want to match backbone[0].body.conv1.weight to conv1.weight, and
backbone[0].body.res2.conv1.weight to res2.conv1.weight.
"""
model_keys = sorted(model_state_dict.keys())
if c2_conversion:
ckpt_state_dict, original_keys = convert_c2_detectron_names(ckpt_state_dict)
# original_keys: the name in the original dict (before renaming)
else:
original_keys = {x: x for x in ckpt_state_dict.keys()}
ckpt_keys = sorted(ckpt_state_dict.keys())
def<fim_suffix><fim_middle> match(a, b):
# Matched ckpt_key should be a complete (starts with '.') suffix.
# For example, roi_heads.mesh_head.whatever_conv1 does not match conv1,
# but matches whatever_conv1 or mesh_head.whatever_conv1.
return a == b or a.endswith("." + b)
|
match(a, b):
# Matched ckpt_key should be a complete (starts with '.') suffix.
# For example, roi_heads.mesh_head.whatever_conv1 does not match conv1,
# but matches whatever_conv1 or mesh_head.whatever_conv1.
return a == b or a.endswith("." + b)
|
METHOD
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/structures/masks.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates.
import copy
import itertools
import numpy as np
from typing import Any, Iterator, List, Union
import pycocotools.mask as mask_util
import torch
from torch import device
from detectron2.layers.roi_align import ROIAlign
from detectron2.utils.memory import retry_if_cuda_oom
from .boxes import Boxes
def polygon_area(x, y):
# Using the shoelace formula
# https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
def polygons_to_bitmask(polygons: List[np.ndarray], height: int, width: int) -> np.ndarray:
"""
Args:
polygons (list[ndarray]): each array has shape (Nx2,)
height, width (int)
Returns:
ndarray: a bool mask of shape (height, width)
"""
if len(polygons) == 0:
# COCOAPI does not support empty polygons
return np.zeros((height, width)).astype(np.bool)
rles = mask_util.frPyObjects(polygons, height, width)
rle = mask_util.merge(rles)
return mask_util.decode(rle).astype(np.bool)
def rasterize_polygons_within_box(
polygons: List[np.ndarray], box: np.ndarray, mask_size: int
) -> torch.Tensor:
"""
Rasterize the polygons into a mask image and
crop the mask content in the given box.
The cropped mask is resized to (mask_size, mask_size).
This function is used when generating training targets for mask head in Mask R-CNN.
Given original ground-truth masks for an image, new ground-truth mask
training targets in the size of `mask_size x mask_size`
must be provided for each predicted box. This function will be called to
produce such targets.
Args:
polygons (list[ndarray[float]]): a list of polygons, which represents an instance.
box: 4-element numpy array
mask_size (int):
Returns:
Tensor: BoolTensor of shape (mask_size, mask_size)
"""
# 1. Shift the polygons w.r.t the boxes
w, h = box[2] - box[0], box[3] - box[1]
polygons = copy.deepcopy(polygons)
for p in polygons:
p[0::2] = p[0::2] - box[0]
p[1::2] = p[1::2] - box[1]
# 2. Rescale the polygons to the new box size
# max() to avoid division by small number
ratio_h = mask_size / max(h, 0.1)
ratio_w = mask_size / max(w, 0.1)
if ratio_h == ratio_w:
for p in polygons:
p *= ratio_h
else:
for p in polygons:
p[0::2] *= ratio_w
p[1::2] *= ratio_h
# 3. Rasterize the polygons with coco api
mask = polygons_to_bitmask(polygons, mask_size, mask_size)
mask = torch.from_numpy(mask)
return mask
class BitMasks:
"""
This class stores the segmentation masks for all objects in one image, in
the form of bitmaps.
Attributes:
tensor: bool Tensor of N,H,W, representing N instances in the image.
"""
def __init__(self, tensor: Union[torch.Tensor, np.ndarray]):
"""
Args:
tensor: bool Tensor of N,H,W, representing N instances in the image.
"""
device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
tensor = torch.as_tensor(tensor, dtype=torch.bool, device=device)
assert tensor.dim() == 3, tensor.size()
self.image_size = tensor.shape[1:]
self.tensor = tensor
@torch.jit.unused
def to(self, *args: Any, **kwargs: Any) -> "BitMasks":
return BitMasks(self.tensor.to(*args, **kwargs))
@property
def device(self) -> torch.device:
return self.tensor.device
@torch.jit.unused
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "BitMasks":
"""
Returns:
BitMasks: Create a new :class:`BitMasks` by indexing.
The following usage are allowed:
1. `new_masks = masks[3]`: return a `BitMasks` which contains only one mask.
2. `new_masks = masks[2:10]`: return a slice of masks.
3. `new_masks = masks[vector]`, where vector is a torch.BoolTensor
with `length = len(masks)`. Nonzero elements in the vector will be selected.
Note that the returned object might share storage with this object,
subject to Pytorch's indexing semantics.
"""
if isinstance(item, int):
return BitMasks(self.tensor[item].unsqueeze(0))
m = self.tensor[item]
assert m.dim() == 3, "Indexing on BitMasks with {} returns a tensor with shape {}!".format(
item, m.shape
)
return BitMasks(m)
@torch.jit.unused
def __iter__(self) -> torch.Tensor:
yield from self.tensor
@torch.jit.unused
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.tensor))
return s
def __len__(self) -> int:
return self.tensor.shape[0]
def nonempty(self) -> torch.Tensor:
"""
Find masks that are non-empty.
Returns:
Tensor: a BoolTensor which represents
whether each mask is empty (False) or non-empty (True).
"""
return self.tensor.flatten(1).any(dim=1)
@staticmethod
def from_polygon_masks(
polygon_masks: Union["PolygonMasks", List[List[np.ndarray]]], height: int, width: int
) -> "BitMasks":
"""
Args:
polygon_masks (list[list[ndarray]] or PolygonMasks)
height, width (int)
"""
if isinstance(polygon_masks, PolygonMasks):
polygon_masks = polygon_masks.polygons
masks = [polygons_to_bitmask(p, height, width) for p in polygon_masks]
if len(masks):
return BitMasks(torch.stack([torch.from_numpy(x) for x in masks]))
else:
return BitMasks(torch.empty(0, height, width, dtype=torch.bool))
@staticmethod
def from_roi_masks(roi_masks: "ROIMasks", height: int, width: int) -> "BitMasks":
"""
Args:
roi_masks:
height, width (int):
"""
return roi_masks.to_bitmasks(height, width)
def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:
"""
Crop each bitmask by the given box, and resize results to (mask_size, mask_size).
This can be used to prepare training targets for Mask R-CNN.
It has less reconstruction error compared to rasterization with polygons.
However we observe no difference in accuracy,
but BitMasks requires more memory to store all the masks.
Args:
boxes (Tensor): Nx4 tensor storing the boxes for each mask
mask_size (int): the size of the rasterized mask.
Returns:
Tensor:
A bool tensor of shape (N, mask_size, mask_size), where
N is the number of predicted boxes for this image.
"""
assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self))
device = self.tensor.device
batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None]
rois = torch.cat([batch_inds, boxes], dim=1) # Nx5
bit_masks = self.tensor.to(dtype=torch.float32)
rois = rois.to(device=device)
output = (
ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True)
.forward(bit_masks[:, None, :, :], rois)
.squeeze(1)
)
output = output >= 0.5
return output
def get_bounding_boxes(self) -> Boxes:
"""
Returns:
Boxes: tight bounding boxes around bitmasks.
If a mask is empty, it's bounding box will be all zero.
"""
boxes = torch.zeros(self.tensor.shape[0], 4, dtype=torch.float32)
x_any = torch.any(self.tensor, dim=1)
y_any = torch.any(self.tensor, dim=2)
for idx in range(self.tensor.shape[0]):
x = torch.where(x_any[idx, :])[0]
y = torch.where(y_any[idx, :])[0]
if len(x) > 0 and len(y) > 0:
boxes[idx, :] = torch.as_tensor(
[x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=torch.float32
)
return Boxes(boxes)
@staticmethod
def cat(bitmasks_list: List["BitMasks"]) -> "BitMasks":
"""
Concatenates a list of BitMasks into a single BitMasks
Arguments:
bitmasks_list (list[BitMasks])
Returns:
BitMasks: the concatenated BitMasks
"""
assert isinstance(bitmasks_list, (list, tuple))
assert len(bitmasks_list) > 0
assert all(isinstance(bitmask, BitMasks) for bitmask in bitmasks_list)
cat_bitmasks = type(bitmasks_list[0])(torch.cat([bm.tensor for bm in bitmasks_list], dim=0))
return cat_bitmasks
class PolygonMasks:
"""
This class stores the segmentation masks for all objects in one image, in the form of polygons.
Attributes:
polygons: list[list[ndarray]]. Each ndarray is a float64 vector representing a polygon.
"""
def __init__(self, polygons: List[List[Union[torch.Tensor, np.ndarray]]]):
"""
Arguments:
polygons (list[list[np.ndarray]]): The first
level of the list correspond to individual instances,
the second level to all the polygons that compose the
instance, and the third level to the polygon coordinates.
The third level array should have the format of
[x0, y0, x1, y1, ..., xn, yn] (n >= 3).
"""
if not isinstance(polygons, list):
raise ValueError(
"Cannot create PolygonMasks: Expect a list of list of polygons per image. "
"Got '{}' instead.".format(type(polygons))
)
def _make_array(t: Union[torch.Tensor, np.ndarray]) -> np.ndarray:
# Use float64 for higher precision, because why not?
# Always put polygons on CPU (self.to is a no-op) since they
# are supposed to be small tensors.
# May need to change this assumption if GPU placement becomes useful
if isinstance(t, torch.Tensor):
t = t.cpu().numpy()
return np.asarray(t).astype("float64")
def<fim_suffix><fim_middle> process_polygons(
polygons_per_instance: List[Union[torch.Tensor, np.ndarray]]
) -> List[np.ndarray]:
if not isinstance(polygons_per_instance, list):
raise ValueError(
"Cannot create polygons: Expect a list of polygons per instance. "
"Got '{}' instead.".format(type(polygons_per_instance))
)
# transform each polygon to a numpy array
polygons_per_instance = [_make_array(p) for p in polygons_per_instance]
for polygon in polygons_per_instance:
if len(polygon) % 2 != 0 or len(polygon) < 6:
raise ValueError(f"Cannot create a polygon from {len(polygon)} coordinates.")
return polygons_per_instance
|
process_polygons(
polygons_per_instance: List[Union[torch.Tensor, np.ndarray]]
) -> List[np.ndarray]:
if not isinstance(polygons_per_instance, list):
raise ValueError(
"Cannot create polygons: Expect a list of polygons per instance. "
"Got '{}' instead.".format(type(polygons_per_instance))
)
# transform each polygon to a numpy array
polygons_per_instance = [_make_array(p) for p in polygons_per_instance]
for polygon in polygons_per_instance:
if len(polygon) % 2 != 0 or len(polygon) < 6:
raise ValueError(f"Cannot create a polygon from {len(polygon)} coordinates.")
return polygons_per_instance
|
METHOD
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/checkpoint/c2_model_loading.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates.
import copy
import logging
import re
from typing import Dict, List
import torch
from tabulate import tabulate
def convert_basic_c2_names(original_keys):
"""
Apply some basic name conversion to names in C2 weights.
It only deals with typical backbone models.
Args:
original_keys (list[str]):
Returns:
list[str]: The same number of strings matching those in original_keys.
"""
layer_keys = copy.deepcopy(original_keys)
layer_keys = [
{"pred_b": "linear_b", "pred_w": "linear_w"}.get(k, k) for k in layer_keys
] # some hard-coded mappings
layer_keys = [k.replace("_", ".") for k in layer_keys]
layer_keys = [re.sub("\\.b$", ".bias", k) for k in layer_keys]
layer_keys = [re.sub("\\.w$", ".weight", k) for k in layer_keys]
# Uniform both bn and gn names to "norm"
layer_keys = [re.sub("bn\\.s$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.bias$", "norm.bias", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.rm", "norm.running_mean", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.running.mean$", "norm.running_mean", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.riv$", "norm.running_var", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.running.var$", "norm.running_var", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.gamma$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.beta$", "norm.bias", k) for k in layer_keys]
layer_keys = [re.sub("gn\\.s$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("gn\\.bias$", "norm.bias", k) for k in layer_keys]
# stem
layer_keys = [re.sub("^res\\.conv1\\.norm\\.", "conv1.norm.", k) for k in layer_keys]
# to avoid mis-matching with "conv1" in other components (e.g. detection head)
layer_keys = [re.sub("^conv1\\.", "stem.conv1.", k) for k in layer_keys]
# layer1-4 is used by torchvision, however we follow the C2 naming strategy (res2-5)
# layer_keys = [re.sub("^res2.", "layer1.", k) for k in layer_keys]
# layer_keys = [re.sub("^res3.", "layer2.", k) for k in layer_keys]
# layer_keys = [re.sub("^res4.", "layer3.", k) for k in layer_keys]
# layer_keys = [re.sub("^res5.", "layer4.", k) for k in layer_keys]
# blocks
layer_keys = [k.replace(".branch1.", ".shortcut.") for k in layer_keys]
layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys]
layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys]
layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys]
# DensePose substitutions
layer_keys = [re.sub("^body.conv.fcn", "body_conv_fcn", k) for k in layer_keys]
layer_keys = [k.replace("AnnIndex.lowres", "ann_index_lowres") for k in layer_keys]
layer_keys = [k.replace("Index.UV.lowres", "index_uv_lowres") for k in layer_keys]
layer_keys = [k.replace("U.lowres", "u_lowres") for k in layer_keys]
layer_keys = [k.replace("V.lowres", "v_lowres") for k in layer_keys]
return layer_keys
def convert_c2_detectron_names(weights):
"""
Map Caffe2 Detectron weight names to Detectron2 names.
Args:
weights (dict): name -> tensor
Returns:
dict: detectron2 names -> tensor
dict: detectron2 names -> C2 names
"""
logger = logging.getLogger(__name__)
logger.info("Renaming Caffe2 weights ......")
original_keys = sorted(weights.keys())
layer_keys = copy.deepcopy(original_keys)
layer_keys = convert_basic_c2_names(layer_keys)
# --------------------------------------------------------------------------
# RPN hidden representation conv
# --------------------------------------------------------------------------
# FPN case
# In the C2 model, the RPN hidden layer conv is defined for FPN level 2 and then
# shared for all other levels, hence the appearance of "fpn2"
layer_keys = [
k.replace("conv.rpn.fpn2", "proposal_generator.rpn_head.conv") for k in layer_keys
]
# Non-FPN case
layer_keys = [k.replace("conv.rpn", "proposal_generator.rpn_head.conv") for k in layer_keys]
# --------------------------------------------------------------------------
# RPN box transformation conv
# --------------------------------------------------------------------------
# FPN case (see note above about "fpn2")
layer_keys = [
k.replace("rpn.bbox.pred.fpn2", "proposal_generator.rpn_head.anchor_deltas")
for k in layer_keys
]
layer_keys = [
k.replace("rpn.cls.logits.fpn2", "proposal_generator.rpn_head.objectness_logits")
for k in layer_keys
]
# Non-FPN case
layer_keys = [
k.replace("rpn.bbox.pred", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys
]
layer_keys = [
k.replace("rpn.cls.logits", "proposal_generator.rpn_head.objectness_logits")
for k in layer_keys
]
# --------------------------------------------------------------------------
# Fast R-CNN box head
# --------------------------------------------------------------------------
layer_keys = [re.sub("^bbox\\.pred", "bbox_pred", k) for k in layer_keys]
layer_keys = [re.sub("^cls\\.score", "cls_score", k) for k in layer_keys]
layer_keys = [re.sub("^fc6\\.", "box_head.fc1.", k) for k in layer_keys]
layer_keys = [re.sub("^fc7\\.", "box_head.fc2.", k) for k in layer_keys]
# 4conv1fc head tensor names: head_conv1_w, head_conv1_gn_s
layer_keys = [re.sub("^head\\.conv", "box_head.conv", k) for k in layer_keys]
# --------------------------------------------------------------------------
# FPN lateral and output convolutions
# --------------------------------------------------------------------------
def<fim_suffix><fim_middle> fpn_map(name):
"""
Look for keys with the following patterns:
1) Starts with "fpn.inner."
Example: "fpn.inner.res2.2.sum.lateral.weight"
Meaning: These are lateral pathway convolutions
2) Starts with "fpn.res"
Example: "fpn.res2.2.sum.weight"
Meaning: These are FPN output convolutions
"""
splits = name.split(".")
norm = ".norm" if "norm" in splits else ""
if name.startswith("fpn.inner."):
# splits example: ['fpn', 'inner', 'res2', '2', 'sum', 'lateral', 'weight']
stage = int(splits[2][len("res") :])
return "fpn_lateral{}{}.{}".format(stage, norm, splits[-1])
elif name.startswith("fpn.res"):
# splits example: ['fpn', 'res2', '2', 'sum', 'weight']
stage = int(splits[1][len("res") :])
return "fpn_output{}{}.{}".format(stage, norm, splits[-1])
return name
|
fpn_map(name):
"""
Look for keys with the following patterns:
1) Starts with "fpn.inner."
Example: "fpn.inner.res2.2.sum.lateral.weight"
Meaning: These are lateral pathway convolutions
2) Starts with "fpn.res"
Example: "fpn.res2.2.sum.weight"
Meaning: These are FPN output convolutions
"""
splits = name.split(".")
norm = ".norm" if "norm" in splits else ""
if name.startswith("fpn.inner."):
# splits example: ['fpn', 'inner', 'res2', '2', 'sum', 'lateral', 'weight']
stage = int(splits[2][len("res") :])
return "fpn_lateral{}{}.{}".format(stage, norm, splits[-1])
elif name.startswith("fpn.res"):
# splits example: ['fpn', 'res2', '2', 'sum', 'weight']
stage = int(splits[1][len("res") :])
return "fpn_output{}{}.{}".format(stage, norm, splits[-1])
return name
|
METHOD
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/checkpoint/c2_model_loading.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates.
import copy
import logging
import re
from typing import Dict, List
import torch
from tabulate import tabulate
def convert_basic_c2_names(original_keys):
"""
Apply some basic name conversion to names in C2 weights.
It only deals with typical backbone models.
Args:
original_keys (list[str]):
Returns:
list[str]: The same number of strings matching those in original_keys.
"""
layer_keys = copy.deepcopy(original_keys)
layer_keys = [
{"pred_b": "linear_b", "pred_w": "linear_w"}.get(k, k) for k in layer_keys
] # some hard-coded mappings
layer_keys = [k.replace("_", ".") for k in layer_keys]
layer_keys = [re.sub("\\.b$", ".bias", k) for k in layer_keys]
layer_keys = [re.sub("\\.w$", ".weight", k) for k in layer_keys]
# Uniform both bn and gn names to "norm"
layer_keys = [re.sub("bn\\.s$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.bias$", "norm.bias", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.rm", "norm.running_mean", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.running.mean$", "norm.running_mean", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.riv$", "norm.running_var", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.running.var$", "norm.running_var", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.gamma$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.beta$", "norm.bias", k) for k in layer_keys]
layer_keys = [re.sub("gn\\.s$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("gn\\.bias$", "norm.bias", k) for k in layer_keys]
# stem
layer_keys = [re.sub("^res\\.conv1\\.norm\\.", "conv1.norm.", k) for k in layer_keys]
# to avoid mis-matching with "conv1" in other components (e.g. detection head)
layer_keys = [re.sub("^conv1\\.", "stem.conv1.", k) for k in layer_keys]
# layer1-4 is used by torchvision, however we follow the C2 naming strategy (res2-5)
# layer_keys = [re.sub("^res2.", "layer1.", k) for k in layer_keys]
# layer_keys = [re.sub("^res3.", "layer2.", k) for k in layer_keys]
# layer_keys = [re.sub("^res4.", "layer3.", k) for k in layer_keys]
# layer_keys = [re.sub("^res5.", "layer4.", k) for k in layer_keys]
# blocks
layer_keys = [k.replace(".branch1.", ".shortcut.") for k in layer_keys]
layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys]
layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys]
layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys]
# DensePose substitutions
layer_keys = [re.sub("^body.conv.fcn", "body_conv_fcn", k) for k in layer_keys]
layer_keys = [k.replace("AnnIndex.lowres", "ann_index_lowres") for k in layer_keys]
layer_keys = [k.replace("Index.UV.lowres", "index_uv_lowres") for k in layer_keys]
layer_keys = [k.replace("U.lowres", "u_lowres") for k in layer_keys]
layer_keys = [k.replace("V.lowres", "v_lowres") for k in layer_keys]
return layer_keys
def convert_c2_detectron_names(weights):
"""
Map Caffe2 Detectron weight names to Detectron2 names.
Args:
weights (dict): name -> tensor
Returns:
dict: detectron2 names -> tensor
dict: detectron2 names -> C2 names
"""
logger = logging.getLogger(__name__)
logger.info("Renaming Caffe2 weights ......")
original_keys = sorted(weights.keys())
layer_keys = copy.deepcopy(original_keys)
layer_keys = convert_basic_c2_names(layer_keys)
# --------------------------------------------------------------------------
# RPN hidden representation conv
# --------------------------------------------------------------------------
# FPN case
# In the C2 model, the RPN hidden layer conv is defined for FPN level 2 and then
# shared for all other levels, hence the appearance of "fpn2"
layer_keys = [
k.replace("conv.rpn.fpn2", "proposal_generator.rpn_head.conv") for k in layer_keys
]
# Non-FPN case
layer_keys = [k.replace("conv.rpn", "proposal_generator.rpn_head.conv") for k in layer_keys]
# --------------------------------------------------------------------------
# RPN box transformation conv
# --------------------------------------------------------------------------
# FPN case (see note above about "fpn2")
layer_keys = [
k.replace("rpn.bbox.pred.fpn2", "proposal_generator.rpn_head.anchor_deltas")
for k in layer_keys
]
layer_keys = [
k.replace("rpn.cls.logits.fpn2", "proposal_generator.rpn_head.objectness_logits")
for k in layer_keys
]
# Non-FPN case
layer_keys = [
k.replace("rpn.bbox.pred", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys
]
layer_keys = [
k.replace("rpn.cls.logits", "proposal_generator.rpn_head.objectness_logits")
for k in layer_keys
]
# --------------------------------------------------------------------------
# Fast R-CNN box head
# --------------------------------------------------------------------------
layer_keys = [re.sub("^bbox\\.pred", "bbox_pred", k) for k in layer_keys]
layer_keys = [re.sub("^cls\\.score", "cls_score", k) for k in layer_keys]
layer_keys = [re.sub("^fc6\\.", "box_head.fc1.", k) for k in layer_keys]
layer_keys = [re.sub("^fc7\\.", "box_head.fc2.", k) for k in layer_keys]
# 4conv1fc head tensor names: head_conv1_w, head_conv1_gn_s
layer_keys = [re.sub("^head\\.conv", "box_head.conv", k) for k in layer_keys]
# --------------------------------------------------------------------------
# FPN lateral and output convolutions
# --------------------------------------------------------------------------
def fpn_map(name):
"""
Look for keys with the following patterns:
1) Starts with "fpn.inner."
Example: "fpn.inner.res2.2.sum.lateral.weight"
Meaning: These are lateral pathway convolutions
2) Starts with "fpn.res"
Example: "fpn.res2.2.sum.weight"
Meaning: These are FPN output convolutions
"""
splits = name.split(".")
norm = ".norm" if "norm" in splits else ""
if name.startswith("fpn.inner."):
# splits example: ['fpn', 'inner', 'res2', '2', 'sum', 'lateral', 'weight']
stage = int(splits[2][len("res") :])
return "fpn_lateral{}{}.{}".format(stage, norm, splits[-1])
elif name.startswith("fpn.res"):
# splits example: ['fpn', 'res2', '2', 'sum', 'weight']
stage = int(splits[1][len("res") :])
return "fpn_output{}{}.{}".format(stage, norm, splits[-1])
return name
layer_keys = [fpn_map(k) for k in layer_keys]
# --------------------------------------------------------------------------
# Mask R-CNN mask head
# --------------------------------------------------------------------------
# roi_heads.StandardROIHeads case
layer_keys = [k.replace(".[mask].fcn", "mask_head.mask_fcn") for k in layer_keys]
layer_keys = [re.sub("^\\.mask\\.fcn", "mask_head.mask_fcn", k) for k in layer_keys]
layer_keys = [k.replace("mask.fcn.logits", "mask_head.predictor") for k in layer_keys]
# roi_heads.Res5ROIHeads case
layer_keys = [k.replace("conv5.mask", "mask_head.deconv") for k in layer_keys]
# --------------------------------------------------------------------------
# Keypoint R-CNN head
# --------------------------------------------------------------------------
# interestingly, the keypoint head convs have blob names that are simply "conv_fcnX"
layer_keys = [k.replace("conv.fcn", "roi_heads.keypoint_head.conv_fcn") for k in layer_keys]
layer_keys = [
k.replace("kps.score.lowres", "roi_heads.keypoint_head.score_lowres") for k in layer_keys
]
layer_keys = [k.replace("kps.score.", "roi_heads.keypoint_head.score.") for k in layer_keys]
# --------------------------------------------------------------------------
# Done with replacements
# --------------------------------------------------------------------------
assert len(set(layer_keys)) == len(layer_keys)
assert len(original_keys) == len(layer_keys)
new_weights = {}
new_keys_to_original_keys = {}
for orig, renamed in zip(original_keys, layer_keys):
new_keys_to_original_keys[renamed] = orig
if renamed.startswith("bbox_pred.") or renamed.startswith("mask_head.predictor."):
# remove the meaningless prediction weight for background class
new_start_idx = 4 if renamed.startswith("bbox_pred.") else 1
new_weights[renamed] = weights[orig][new_start_idx:]
logger.info(
"Remove prediction weight for background class in {}. The shape changes from "
"{} to {}.".format(
renamed, tuple(weights[orig].shape), tuple(new_weights[renamed].shape)
)
)
elif renamed.startswith("cls_score."):
# move weights of bg class from original index 0 to last index
logger.info(
"Move classification weights for background class in {} from index 0 to "
"index {}.".format(renamed, weights[orig].shape[0] - 1)
)
new_weights[renamed] = torch.cat([weights[orig][1:], weights[orig][:1]])
else:
new_weights[renamed] = weights[orig]
return new_weights, new_keys_to_original_keys
# Note the current matching is not symmetric.
# it assumes model_state_dict will have longer names.
def align_and_update_state_dicts(model_state_dict, ckpt_state_dict, c2_conversion=True):
"""
Match names between the two state-dict, and returns a new chkpt_state_dict with names
converted to match model_state_dict with heuristics. The returned dict can be later
loaded with fvcore checkpointer.
If `c2_conversion==True`, `ckpt_state_dict` is assumed to be a Caffe2
model and will be renamed at first.
Strategy: suppose that the models that we will create will have prefixes appended
to each of its keys, for example due to an extra level of nesting that the original
pre-trained weights from ImageNet won't contain. For example, model.state_dict()
might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains
res2.conv1.weight. We thus want to match both parameters together.
For that, we look for each model weight, look among all loaded keys if there is one
that is a suffix of the current weight name, and use it if that's the case.
If multiple matches exist, take the one with longest size
of the corresponding name. For example, for the same model as before, the pretrained
weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case,
we want to match backbone[0].body.conv1.weight to conv1.weight, and
backbone[0].body.res2.conv1.weight to res2.conv1.weight.
"""
model_keys = sorted(model_state_dict.keys())
if c2_conversion:
ckpt_state_dict, original_keys = convert_c2_detectron_names(ckpt_state_dict)
# original_keys: the name in the original dict (before renaming)
else:
original_keys = {x: x for x in ckpt_state_dict.keys()}
ckpt_keys = sorted(ckpt_state_dict.keys())
def match(a, b):
# Matched ckpt_key should be a complete (starts with '.') suffix.
# For example, roi_heads.mesh_head.whatever_conv1 does not match conv1,
# but matches whatever_conv1 or mesh_head.whatever_conv1.
return a == b or a.endswith("." + b)
# get a matrix of string matches, where each (i, j) entry correspond to the size of the
# ckpt_key string, if it matches
match_matrix = [len(j) if match(i, j) else 0 for i in model_keys for j in ckpt_keys]
match_matrix = torch.as_tensor(match_matrix).view(len(model_keys), len(ckpt_keys))
# use the matched one with longest size in case of multiple matches
max_match_size, idxs = match_matrix.max(1)
# remove indices that correspond to no-match
idxs[max_match_size == 0] = -1
logger = logging.getLogger(__name__)
# matched_pairs (matched checkpoint key --> matched model key)
matched_keys = {}
result_state_dict = {}
for idx_model, idx_ckpt in enumerate(idxs.tolist()):
if idx_ckpt == -1:
continue
key_model = model_keys[idx_model]
key_ckpt = ckpt_keys[idx_ckpt]
value_ckpt = ckpt_state_dict[key_ckpt]
shape_in_model = model_state_dict[key_model].shape
if shape_in_model != value_ckpt.shape:
logger.warning(
"Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format(
key_ckpt, value_ckpt.shape, key_model, shape_in_model
)
)
logger.warning(
"{} will not be loaded. Please double check and see if this is desired.".format(
key_ckpt
)
)
continue
assert key_model not in result_state_dict
result_state_dict[key_model] = value_ckpt
if key_ckpt in matched_keys: # already added to matched_keys
logger.error(
"Ambiguity found for {} in checkpoint!"
"It matches at least two keys in the model ({} and {}).".format(
key_ckpt, key_model, matched_keys[key_ckpt]
)
)
raise ValueError("Cannot match one checkpoint key to multiple keys in the model.")
matched_keys[key_ckpt] = key_model
# logging:
matched_model_keys = sorted(matched_keys.values())
if len(matched_model_keys) == 0:
logger.warning("No weights in checkpoint matched with model.")
return ckpt_state_dict
common_prefix = _longest_common_prefix(matched_model_keys)
rev_matched_keys = {v: k for k, v in matched_keys.items()}
original_keys = {k: original_keys[rev_matched_keys[k]] for k in matched_model_keys}
model_key_groups = _group_keys_by_module(matched_model_keys, original_keys)
table = []
memo = set()
for key_model in matched_model_keys:
if key_model in memo:
continue
if key_model in model_key_groups:
group = model_key_groups[key_model]
memo |= set(group)
shapes = [tuple(model_state_dict[k].shape) for k in group]
table.append(
(
_longest_common_prefix([k[len(common_prefix) :] for k in group]) + "*",
_group_str([original_keys[k] for k in group]),
" ".join([str(x).replace(" ", "") for x in shapes]),
)
)
else:
key_checkpoint = original_keys[key_model]
shape = str(tuple(model_state_dict[key_model].shape))
table.append((key_model[len(common_prefix) :], key_checkpoint, shape))
table_str = tabulate(
table, tablefmt="pipe", headers=["Names in Model", "Names in Checkpoint", "Shapes"]
)
logger.info(
"Following weights matched with "
+ (f"submodule {common_prefix[:-1]}" if common_prefix else "model")
+ ":\n"
+ table_str
)
unmatched_ckpt_keys = [k for k in ckpt_keys if k not in set(matched_keys.keys())]
for k in unmatched_ckpt_keys:
result_state_dict[k] = ckpt_state_dict[k]
return result_state_dict
def _group_keys_by_module(keys: List[str], original_names: Dict[str, str]):
"""
Params in the same submodule are grouped together.
Args:
keys: names of all parameters
original_names: mapping from parameter name to their name in the checkpoint
Returns:
dict[name -> all other names in the same group]
"""
def<fim_suffix><fim_middle> _submodule_name(key):
pos = key.rfind(".")
if pos < 0:
return None
prefix = key[: pos + 1]
return prefix
|
_submodule_name(key):
pos = key.rfind(".")
if pos < 0:
return None
prefix = key[: pos + 1]
return prefix
|
METHOD
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/config/config.py<fim_prefix># -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import functools
import inspect
import logging
from fvcore.common.config import CfgNode as _CfgNode
from detectron2.utils.file_io import PathManager
class CfgNode(_CfgNode):
"""
The same as `fvcore.common.config.CfgNode`, but different in:
1. Use unsafe yaml loading by default.
Note that this may lead to arbitrary code execution: you must not
load a config file from untrusted sources before manually inspecting
the content of the file.
2. Support config versioning.
When attempting to merge an old config, it will convert the old config automatically.
.. automethod:: clone
.. automethod:: freeze
.. automethod:: defrost
.. automethod:: is_frozen
.. automethod:: load_yaml_with_base
.. automethod:: merge_from_list
.. automethod:: merge_from_other_cfg
"""
@classmethod
def _open_cfg(cls, filename):
return PathManager.open(filename, "r")
# Note that the default value of allow_unsafe is changed to True
def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None:
"""
Load content from the given config file and merge it into self.
Args:
cfg_filename: config filename
allow_unsafe: allow unsafe yaml syntax
"""
assert PathManager.isfile(cfg_filename), f"Config file '{cfg_filename}' does not exist!"
loaded_cfg = self.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe)
loaded_cfg = type(self)(loaded_cfg)
# defaults.py needs to import CfgNode
from .defaults import _C
latest_ver = _C.VERSION
assert (
latest_ver == self.VERSION
), "CfgNode.merge_from_file is only allowed on a config object of latest version!"
logger = logging.getLogger(__name__)
loaded_ver = loaded_cfg.get("VERSION", None)
if loaded_ver is None:
from .compat import guess_version
loaded_ver = guess_version(loaded_cfg, cfg_filename)
assert loaded_ver <= self.VERSION, "Cannot merge a v{} config into a v{} config.".format(
loaded_ver, self.VERSION
)
if loaded_ver == self.VERSION:
self.merge_from_other_cfg(loaded_cfg)
else:
# compat.py needs to import CfgNode
from .compat import upgrade_config, downgrade_config
logger.warning(
"Loading an old v{} config file '{}' by automatically upgrading to v{}. "
"See docs/CHANGELOG.md for instructions to update your files.".format(
loaded_ver, cfg_filename, self.VERSION
)
)
# To convert, first obtain a full config at an old version
old_self = downgrade_config(self, to_version=loaded_ver)
old_self.merge_from_other_cfg(loaded_cfg)
new_config = upgrade_config(old_self)
self.clear()
self.update(new_config)
def dump(self, *args, **kwargs):
"""
Returns:
str: a yaml string representation of the config
"""
# to make it show up in docs
return super().dump(*args, **kwargs)
global_cfg = CfgNode()
def get_cfg() -> CfgNode:
"""
Get a copy of the default config.
Returns:
a detectron2 CfgNode instance.
"""
from .defaults import _C
return _C.clone()
def set_global_cfg(cfg: CfgNode) -> None:
"""
Let the global config point to the given cfg.
Assume that the given "cfg" has the key "KEY", after calling
`set_global_cfg(cfg)`, the key can be accessed by:
::
from detectron2.config import global_cfg
print(global_cfg.KEY)
By using a hacky global config, you can access these configs anywhere,
without having to pass the config object or the values deep into the code.
This is a hacky feature introduced for quick prototyping / research exploration.
"""
global global_cfg
global_cfg.clear()
global_cfg.update(cfg)
def configurable(init_func=None, *, from_config=None):
"""
Decorate a function or a class's __init__ method so that it can be called
with a :class:`CfgNode` object using a :func:`from_config` function that translates
:class:`CfgNode` to arguments.
Examples:
::
# Usage 1: Decorator on __init__:
class A:
@configurable
def __init__(self, a, b=2, c=3):
pass
@classmethod
def from_config(cls, cfg): # 'cfg' must be the first argument
# Returns kwargs to be passed to __init__
return {"a": cfg.A, "b": cfg.B}
a1 = A(a=1, b=2) # regular construction
a2 = A(cfg) # construct with a cfg
a3 = A(cfg, b=3, c=4) # construct with extra overwrite
# Usage 2: Decorator on any function. Needs an extra from_config argument:
@configurable(from_config=lambda cfg: {"a: cfg.A, "b": cfg.B})
def a_func(a, b=2, c=3):
pass
a1 = a_func(a=1, b=2) # regular call
a2 = a_func(cfg) # call with a cfg
a3 = a_func(cfg, b=3, c=4) # call with extra overwrite
Args:
init_func (callable): a class's ``__init__`` method in usage 1. The
class must have a ``from_config`` classmethod which takes `cfg` as
the first argument.
from_config (callable): the from_config function in usage 2. It must take `cfg`
as its first argument.
"""
if init_func is not None:
assert (
inspect.isfunction(init_func)
and from_config is None
and init_func.__name__ == "__init__"
), "Incorrect use of @configurable. Check API documentation for examples."
@functools.wraps(init_func)
def<fim_suffix><fim_middle> wrapped(self, *args, **kwargs):
try:
from_config_func = type(self).from_config
except AttributeError as e:
raise AttributeError(
"Class with @configurable must have a 'from_config' classmethod."
) from e
if not inspect.ismethod(from_config_func):
raise TypeError("Class with @configurable must have a 'from_config' classmethod.")
if _called_with_cfg(*args, **kwargs):
explicit_args = _get_args_from_config(from_config_func, *args, **kwargs)
init_func(self, **explicit_args)
else:
init_func(self, *args, **kwargs)
|
wrapped(self, *args, **kwargs):
try:
from_config_func = type(self).from_config
except AttributeError as e:
raise AttributeError(
"Class with @configurable must have a 'from_config' classmethod."
) from e
if not inspect.ismethod(from_config_func):
raise TypeError("Class with @configurable must have a 'from_config' classmethod.")
if _called_with_cfg(*args, **kwargs):
explicit_args = _get_args_from_config(from_config_func, *args, **kwargs)
init_func(self, **explicit_args)
else:
init_func(self, *args, **kwargs)
|
METHOD
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/structures/masks.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates.
import copy
import itertools
import numpy as np
from typing import Any, Iterator, List, Union
import pycocotools.mask as mask_util
import torch
from torch import device
from detectron2.layers.roi_align import ROIAlign
from detectron2.utils.memory import retry_if_cuda_oom
from .boxes import Boxes
def polygon_area(x, y):
# Using the shoelace formula
# https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
def polygons_to_bitmask(polygons: List[np.ndarray], height: int, width: int) -> np.ndarray:
"""
Args:
polygons (list[ndarray]): each array has shape (Nx2,)
height, width (int)
Returns:
ndarray: a bool mask of shape (height, width)
"""
if len(polygons) == 0:
# COCOAPI does not support empty polygons
return np.zeros((height, width)).astype(np.bool)
rles = mask_util.frPyObjects(polygons, height, width)
rle = mask_util.merge(rles)
return mask_util.decode(rle).astype(np.bool)
def rasterize_polygons_within_box(
polygons: List[np.ndarray], box: np.ndarray, mask_size: int
) -> torch.Tensor:
"""
Rasterize the polygons into a mask image and
crop the mask content in the given box.
The cropped mask is resized to (mask_size, mask_size).
This function is used when generating training targets for mask head in Mask R-CNN.
Given original ground-truth masks for an image, new ground-truth mask
training targets in the size of `mask_size x mask_size`
must be provided for each predicted box. This function will be called to
produce such targets.
Args:
polygons (list[ndarray[float]]): a list of polygons, which represents an instance.
box: 4-element numpy array
mask_size (int):
Returns:
Tensor: BoolTensor of shape (mask_size, mask_size)
"""
# 1. Shift the polygons w.r.t the boxes
w, h = box[2] - box[0], box[3] - box[1]
polygons = copy.deepcopy(polygons)
for p in polygons:
p[0::2] = p[0::2] - box[0]
p[1::2] = p[1::2] - box[1]
# 2. Rescale the polygons to the new box size
# max() to avoid division by small number
ratio_h = mask_size / max(h, 0.1)
ratio_w = mask_size / max(w, 0.1)
if ratio_h == ratio_w:
for p in polygons:
p *= ratio_h
else:
for p in polygons:
p[0::2] *= ratio_w
p[1::2] *= ratio_h
# 3. Rasterize the polygons with coco api
mask = polygons_to_bitmask(polygons, mask_size, mask_size)
mask = torch.from_numpy(mask)
return mask
class BitMasks:
"""
This class stores the segmentation masks for all objects in one image, in
the form of bitmaps.
Attributes:
tensor: bool Tensor of N,H,W, representing N instances in the image.
"""
def __init__(self, tensor: Union[torch.Tensor, np.ndarray]):
"""
Args:
tensor: bool Tensor of N,H,W, representing N instances in the image.
"""
device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
tensor = torch.as_tensor(tensor, dtype=torch.bool, device=device)
assert tensor.dim() == 3, tensor.size()
self.image_size = tensor.shape[1:]
self.tensor = tensor
@torch.jit.unused
def to(self, *args: Any, **kwargs: Any) -> "BitMasks":
return BitMasks(self.tensor.to(*args, **kwargs))
@property
def device(self) -> torch.device:
return self.tensor.device
@torch.jit.unused
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "BitMasks":
"""
Returns:
BitMasks: Create a new :class:`BitMasks` by indexing.
The following usage are allowed:
1. `new_masks = masks[3]`: return a `BitMasks` which contains only one mask.
2. `new_masks = masks[2:10]`: return a slice of masks.
3. `new_masks = masks[vector]`, where vector is a torch.BoolTensor
with `length = len(masks)`. Nonzero elements in the vector will be selected.
Note that the returned object might share storage with this object,
subject to Pytorch's indexing semantics.
"""
if isinstance(item, int):
return BitMasks(self.tensor[item].unsqueeze(0))
m = self.tensor[item]
assert m.dim() == 3, "Indexing on BitMasks with {} returns a tensor with shape {}!".format(
item, m.shape
)
return BitMasks(m)
@torch.jit.unused
def __iter__(self) -> torch.Tensor:
yield from self.tensor
@torch.jit.unused
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.tensor))
return s
def __len__(self) -> int:
return self.tensor.shape[0]
def nonempty(self) -> torch.Tensor:
"""
Find masks that are non-empty.
Returns:
Tensor: a BoolTensor which represents
whether each mask is empty (False) or non-empty (True).
"""
return self.tensor.flatten(1).any(dim=1)
@staticmethod
def from_polygon_masks(
polygon_masks: Union["PolygonMasks", List[List[np.ndarray]]], height: int, width: int
) -> "BitMasks":
"""
Args:
polygon_masks (list[list[ndarray]] or PolygonMasks)
height, width (int)
"""
if isinstance(polygon_masks, PolygonMasks):
polygon_masks = polygon_masks.polygons
masks = [polygons_to_bitmask(p, height, width) for p in polygon_masks]
if len(masks):
return BitMasks(torch.stack([torch.from_numpy(x) for x in masks]))
else:
return BitMasks(torch.empty(0, height, width, dtype=torch.bool))
@staticmethod
def from_roi_masks(roi_masks: "ROIMasks", height: int, width: int) -> "BitMasks":
"""
Args:
roi_masks:
height, width (int):
"""
return roi_masks.to_bitmasks(height, width)
def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:
"""
Crop each bitmask by the given box, and resize results to (mask_size, mask_size).
This can be used to prepare training targets for Mask R-CNN.
It has less reconstruction error compared to rasterization with polygons.
However we observe no difference in accuracy,
but BitMasks requires more memory to store all the masks.
Args:
boxes (Tensor): Nx4 tensor storing the boxes for each mask
mask_size (int): the size of the rasterized mask.
Returns:
Tensor:
A bool tensor of shape (N, mask_size, mask_size), where
N is the number of predicted boxes for this image.
"""
assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self))
device = self.tensor.device
batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None]
rois = torch.cat([batch_inds, boxes], dim=1) # Nx5
bit_masks = self.tensor.to(dtype=torch.float32)
rois = rois.to(device=device)
output = (
ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True)
.forward(bit_masks[:, None, :, :], rois)
.squeeze(1)
)
output = output >= 0.5
return output
def get_bounding_boxes(self) -> Boxes:
"""
Returns:
Boxes: tight bounding boxes around bitmasks.
If a mask is empty, it's bounding box will be all zero.
"""
boxes = torch.zeros(self.tensor.shape[0], 4, dtype=torch.float32)
x_any = torch.any(self.tensor, dim=1)
y_any = torch.any(self.tensor, dim=2)
for idx in range(self.tensor.shape[0]):
x = torch.where(x_any[idx, :])[0]
y = torch.where(y_any[idx, :])[0]
if len(x) > 0 and len(y) > 0:
boxes[idx, :] = torch.as_tensor(
[x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=torch.float32
)
return Boxes(boxes)
@staticmethod
def cat(bitmasks_list: List["BitMasks"]) -> "BitMasks":
"""
Concatenates a list of BitMasks into a single BitMasks
Arguments:
bitmasks_list (list[BitMasks])
Returns:
BitMasks: the concatenated BitMasks
"""
assert isinstance(bitmasks_list, (list, tuple))
assert len(bitmasks_list) > 0
assert all(isinstance(bitmask, BitMasks) for bitmask in bitmasks_list)
cat_bitmasks = type(bitmasks_list[0])(torch.cat([bm.tensor for bm in bitmasks_list], dim=0))
return cat_bitmasks
class PolygonMasks:
"""
This class stores the segmentation masks for all objects in one image, in the form of polygons.
Attributes:
polygons: list[list[ndarray]]. Each ndarray is a float64 vector representing a polygon.
"""
def __init__(self, polygons: List[List[Union[torch.Tensor, np.ndarray]]]):
"""
Arguments:
polygons (list[list[np.ndarray]]): The first
level of the list correspond to individual instances,
the second level to all the polygons that compose the
instance, and the third level to the polygon coordinates.
The third level array should have the format of
[x0, y0, x1, y1, ..., xn, yn] (n >= 3).
"""
if not isinstance(polygons, list):
raise ValueError(
"Cannot create PolygonMasks: Expect a list of list of polygons per image. "
"Got '{}' instead.".format(type(polygons))
)
def<fim_suffix><fim_middle> _make_array(t: Union[torch.Tensor, np.ndarray]) -> np.ndarray:
# Use float64 for higher precision, because why not?
# Always put polygons on CPU (self.to is a no-op) since they
# are supposed to be small tensors.
# May need to change this assumption if GPU placement becomes useful
if isinstance(t, torch.Tensor):
t = t.cpu().numpy()
return np.asarray(t).astype("float64")
|
_make_array(t: Union[torch.Tensor, np.ndarray]) -> np.ndarray:
# Use float64 for higher precision, because why not?
# Always put polygons on CPU (self.to is a no-op) since they
# are supposed to be small tensors.
# May need to change this assumption if GPU placement becomes useful
if isinstance(t, torch.Tensor):
t = t.cpu().numpy()
return np.asarray(t).astype("float64")
|
METHOD
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/config/config.py<fim_prefix># -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import functools
import inspect
import logging
from fvcore.common.config import CfgNode as _CfgNode
from detectron2.utils.file_io import PathManager
class CfgNode(_CfgNode):
"""
The same as `fvcore.common.config.CfgNode`, but different in:
1. Use unsafe yaml loading by default.
Note that this may lead to arbitrary code execution: you must not
load a config file from untrusted sources before manually inspecting
the content of the file.
2. Support config versioning.
When attempting to merge an old config, it will convert the old config automatically.
.. automethod:: clone
.. automethod:: freeze
.. automethod:: defrost
.. automethod:: is_frozen
.. automethod:: load_yaml_with_base
.. automethod:: merge_from_list
.. automethod:: merge_from_other_cfg
"""
@classmethod
def _open_cfg(cls, filename):
return PathManager.open(filename, "r")
# Note that the default value of allow_unsafe is changed to True
def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None:
"""
Load content from the given config file and merge it into self.
Args:
cfg_filename: config filename
allow_unsafe: allow unsafe yaml syntax
"""
assert PathManager.isfile(cfg_filename), f"Config file '{cfg_filename}' does not exist!"
loaded_cfg = self.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe)
loaded_cfg = type(self)(loaded_cfg)
# defaults.py needs to import CfgNode
from .defaults import _C
latest_ver = _C.VERSION
assert (
latest_ver == self.VERSION
), "CfgNode.merge_from_file is only allowed on a config object of latest version!"
logger = logging.getLogger(__name__)
loaded_ver = loaded_cfg.get("VERSION", None)
if loaded_ver is None:
from .compat import guess_version
loaded_ver = guess_version(loaded_cfg, cfg_filename)
assert loaded_ver <= self.VERSION, "Cannot merge a v{} config into a v{} config.".format(
loaded_ver, self.VERSION
)
if loaded_ver == self.VERSION:
self.merge_from_other_cfg(loaded_cfg)
else:
# compat.py needs to import CfgNode
from .compat import upgrade_config, downgrade_config
logger.warning(
"Loading an old v{} config file '{}' by automatically upgrading to v{}. "
"See docs/CHANGELOG.md for instructions to update your files.".format(
loaded_ver, cfg_filename, self.VERSION
)
)
# To convert, first obtain a full config at an old version
old_self = downgrade_config(self, to_version=loaded_ver)
old_self.merge_from_other_cfg(loaded_cfg)
new_config = upgrade_config(old_self)
self.clear()
self.update(new_config)
def dump(self, *args, **kwargs):
"""
Returns:
str: a yaml string representation of the config
"""
# to make it show up in docs
return super().dump(*args, **kwargs)
global_cfg = CfgNode()
def get_cfg() -> CfgNode:
"""
Get a copy of the default config.
Returns:
a detectron2 CfgNode instance.
"""
from .defaults import _C
return _C.clone()
def set_global_cfg(cfg: CfgNode) -> None:
"""
Let the global config point to the given cfg.
Assume that the given "cfg" has the key "KEY", after calling
`set_global_cfg(cfg)`, the key can be accessed by:
::
from detectron2.config import global_cfg
print(global_cfg.KEY)
By using a hacky global config, you can access these configs anywhere,
without having to pass the config object or the values deep into the code.
This is a hacky feature introduced for quick prototyping / research exploration.
"""
global global_cfg
global_cfg.clear()
global_cfg.update(cfg)
def configurable(init_func=None, *, from_config=None):
"""
Decorate a function or a class's __init__ method so that it can be called
with a :class:`CfgNode` object using a :func:`from_config` function that translates
:class:`CfgNode` to arguments.
Examples:
::
# Usage 1: Decorator on __init__:
class A:
@configurable
def __init__(self, a, b=2, c=3):
pass
@classmethod
def from_config(cls, cfg): # 'cfg' must be the first argument
# Returns kwargs to be passed to __init__
return {"a": cfg.A, "b": cfg.B}
a1 = A(a=1, b=2) # regular construction
a2 = A(cfg) # construct with a cfg
a3 = A(cfg, b=3, c=4) # construct with extra overwrite
# Usage 2: Decorator on any function. Needs an extra from_config argument:
@configurable(from_config=lambda cfg: {"a: cfg.A, "b": cfg.B})
def a_func(a, b=2, c=3):
pass
a1 = a_func(a=1, b=2) # regular call
a2 = a_func(cfg) # call with a cfg
a3 = a_func(cfg, b=3, c=4) # call with extra overwrite
Args:
init_func (callable): a class's ``__init__`` method in usage 1. The
class must have a ``from_config`` classmethod which takes `cfg` as
the first argument.
from_config (callable): the from_config function in usage 2. It must take `cfg`
as its first argument.
"""
if init_func is not None:
assert (
inspect.isfunction(init_func)
and from_config is None
and init_func.__name__ == "__init__"
), "Incorrect use of @configurable. Check API documentation for examples."
@functools.wraps(init_func)
def wrapped(self, *args, **kwargs):
try:
from_config_func = type(self).from_config
except AttributeError as e:
raise AttributeError(
"Class with @configurable must have a 'from_config' classmethod."
) from e
if not inspect.ismethod(from_config_func):
raise TypeError("Class with @configurable must have a 'from_config' classmethod.")
if _called_with_cfg(*args, **kwargs):
explicit_args = _get_args_from_config(from_config_func, *args, **kwargs)
init_func(self, **explicit_args)
else:
init_func(self, *args, **kwargs)
return wrapped
else:
if from_config is None:
return configurable # @configurable() is made equivalent to @configurable
assert inspect.isfunction(
from_config
), "from_config argument of configurable must be a function!"
def wrapper(orig_func):
@functools.wraps(orig_func)
def<fim_suffix><fim_middle> wrapped(*args, **kwargs):
if _called_with_cfg(*args, **kwargs):
explicit_args = _get_args_from_config(from_config, *args, **kwargs)
return orig_func(**explicit_args)
else:
return orig_func(*args, **kwargs)
|
wrapped(*args, **kwargs):
if _called_with_cfg(*args, **kwargs):
explicit_args = _get_args_from_config(from_config, *args, **kwargs)
return orig_func(**explicit_args)
else:
return orig_func(*args, **kwargs)
|
METHOD
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/utils/registry.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates.
from typing import Any
import pydoc
from fvcore.common.registry import Registry # for backward compatibility.
"""
``Registry`` and `locate` provide ways to map a string (typically found
in config files) to callable objects.
"""
__all__ = ["Registry", "locate"]
def _convert_target_to_string(t: Any) -> str:
"""
Inverse of ``locate()``.
Args:
t: any object with ``__module__`` and ``__qualname__``
"""
module, qualname = t.__module__, t.__qualname__
# Compress the path to this object, e.g. ``module.submodule._impl.class``
# may become ``module.submodule.class``, if the later also resolves to the same
# object. This simplifies the string, and also is less affected by moving the
# class implementation.
module_parts = module.split(".")
for k in range(1, len(module_parts)):
prefix = ".".join(module_parts[:k])
candidate = f"{prefix}.{qualname}"
try:
if locate(candidate) is t:
return candidate
except ImportError:
pass
return f"{module}.{qualname}"
def locate(name: str) -> Any:
"""
Locate and return an object ``x`` using an input string ``{x.__module__}.{x.__qualname__}``,
such as "module.submodule.class_name".
Raise Exception if it cannot be found.
"""
obj = pydoc.locate(name)
# Some cases (e.g. torch.optim.sgd.SGD) not handled correctly
# by pydoc.locate. Try a private function from hydra.
if obj is None:
try:<fim_suffix><fim_middle>
# from hydra.utils import get_method - will print many errors
from hydra.utils import _locate
|
# from hydra.utils import get_method - will print many errors
from hydra.utils import _locate
|
TRY
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/config/instantiate.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates.
import dataclasses
import logging
from collections import abc
from typing import Any
from detectron2.utils.registry import _convert_target_to_string, locate
__all__ = ["dump_dataclass", "instantiate"]
def dump_dataclass(obj: Any):
"""
Dump a dataclass recursively into a dict that can be later instantiated.
Args:
obj: a dataclass object
Returns:
dict
"""
assert dataclasses.is_dataclass(obj) and not isinstance(
obj, type
), "dump_dataclass() requires an instance of a dataclass."
ret = {"_target_": _convert_target_to_string(type(obj))}
for f in dataclasses.fields(obj):
v = getattr(obj, f.name)
if dataclasses.is_dataclass(v):
v = dump_dataclass(v)
if isinstance(v, (list, tuple)):
v = [dump_dataclass(x) if dataclasses.is_dataclass(x) else x for x in v]
ret[f.name] = v
return ret
def instantiate(cfg):
"""
Recursively instantiate objects defined in dictionaries by
"_target_" and arguments.
Args:
cfg: a dict-like object with "_target_" that defines the caller, and
other keys that define the arguments
Returns:
object instantiated by cfg
"""
from omegaconf import ListConfig
if isinstance(cfg, ListConfig):
lst = [instantiate(x) for x in cfg]
return ListConfig(lst, flags={"allow_objects": True})
if isinstance(cfg, list):
# Specialize for list, because many classes take
# list[objects] as arguments, such as ResNet, DatasetMapper
return [instantiate(x) for x in cfg]
if isinstance(cfg, abc.Mapping) and "_target_" in cfg:
# conceptually equivalent to hydra.utils.instantiate(cfg) with _convert_=all,
# but faster: https://github.com/facebookresearch/hydra/issues/1200
cfg = {k: instantiate(v) for k, v in cfg.items()}
cls = cfg.pop("_target_")
cls = instantiate(cls)
if isinstance(cls, str):
cls_name = cls
cls = locate(cls_name)
assert cls is not None, cls_name
else:
try:
cls_name = cls.__module__ + "." + cls.__qualname__
except Exception:
# target could be anything, so the above could fail
cls_name = str(cls)
assert callable(cls), f"_target_ {cls} does not define a callable object"
try:<fim_suffix><fim_middle>
return cls(**cfg)
|
return cls(**cfg)
|
TRY
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/config/instantiate.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates.
import dataclasses
import logging
from collections import abc
from typing import Any
from detectron2.utils.registry import _convert_target_to_string, locate
__all__ = ["dump_dataclass", "instantiate"]
def dump_dataclass(obj: Any):
"""
Dump a dataclass recursively into a dict that can be later instantiated.
Args:
obj: a dataclass object
Returns:
dict
"""
assert dataclasses.is_dataclass(obj) and not isinstance(
obj, type
), "dump_dataclass() requires an instance of a dataclass."
ret = {"_target_": _convert_target_to_string(type(obj))}
for f in dataclasses.fields(obj):
v = getattr(obj, f.name)
if dataclasses.is_dataclass(v):
v = dump_dataclass(v)
if isinstance(v, (list, tuple)):
v = [dump_dataclass(x) if dataclasses.is_dataclass(x) else x for x in v]
ret[f.name] = v
return ret
def instantiate(cfg):
"""
Recursively instantiate objects defined in dictionaries by
"_target_" and arguments.
Args:
cfg: a dict-like object with "_target_" that defines the caller, and
other keys that define the arguments
Returns:
object instantiated by cfg
"""
from omegaconf import ListConfig
if isinstance(cfg, ListConfig):
lst = [instantiate(x) for x in cfg]
return ListConfig(lst, flags={"allow_objects": True})
if isinstance(cfg, list):
# Specialize for list, because many classes take
# list[objects] as arguments, such as ResNet, DatasetMapper
return [instantiate(x) for x in cfg]
if isinstance(cfg, abc.Mapping) and "_target_" in cfg:
# conceptually equivalent to hydra.utils.instantiate(cfg) with _convert_=all,
# but faster: https://github.com/facebookresearch/hydra/issues/1200
cfg = {k: instantiate(v) for k, v in cfg.items()}
cls = cfg.pop("_target_")
cls = instantiate(cls)
if isinstance(cls, str):
cls_name = cls
cls = locate(cls_name)
assert cls is not None, cls_name
else:
try:<fim_suffix><fim_middle>
cls_name = cls.__module__ + "." + cls.__qualname__
|
cls_name = cls.__module__ + "." + cls.__qualname__
|
TRY
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/config/config.py<fim_prefix># -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import functools
import inspect
import logging
from fvcore.common.config import CfgNode as _CfgNode
from detectron2.utils.file_io import PathManager
class CfgNode(_CfgNode):
"""
The same as `fvcore.common.config.CfgNode`, but different in:
1. Use unsafe yaml loading by default.
Note that this may lead to arbitrary code execution: you must not
load a config file from untrusted sources before manually inspecting
the content of the file.
2. Support config versioning.
When attempting to merge an old config, it will convert the old config automatically.
.. automethod:: clone
.. automethod:: freeze
.. automethod:: defrost
.. automethod:: is_frozen
.. automethod:: load_yaml_with_base
.. automethod:: merge_from_list
.. automethod:: merge_from_other_cfg
"""
@classmethod
def _open_cfg(cls, filename):
return PathManager.open(filename, "r")
# Note that the default value of allow_unsafe is changed to True
def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None:
"""
Load content from the given config file and merge it into self.
Args:
cfg_filename: config filename
allow_unsafe: allow unsafe yaml syntax
"""
assert PathManager.isfile(cfg_filename), f"Config file '{cfg_filename}' does not exist!"
loaded_cfg = self.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe)
loaded_cfg = type(self)(loaded_cfg)
# defaults.py needs to import CfgNode
from .defaults import _C
latest_ver = _C.VERSION
assert (
latest_ver == self.VERSION
), "CfgNode.merge_from_file is only allowed on a config object of latest version!"
logger = logging.getLogger(__name__)
loaded_ver = loaded_cfg.get("VERSION", None)
if loaded_ver is None:
from .compat import guess_version
loaded_ver = guess_version(loaded_cfg, cfg_filename)
assert loaded_ver <= self.VERSION, "Cannot merge a v{} config into a v{} config.".format(
loaded_ver, self.VERSION
)
if loaded_ver == self.VERSION:
self.merge_from_other_cfg(loaded_cfg)
else:
# compat.py needs to import CfgNode
from .compat import upgrade_config, downgrade_config
logger.warning(
"Loading an old v{} config file '{}' by automatically upgrading to v{}. "
"See docs/CHANGELOG.md for instructions to update your files.".format(
loaded_ver, cfg_filename, self.VERSION
)
)
# To convert, first obtain a full config at an old version
old_self = downgrade_config(self, to_version=loaded_ver)
old_self.merge_from_other_cfg(loaded_cfg)
new_config = upgrade_config(old_self)
self.clear()
self.update(new_config)
def dump(self, *args, **kwargs):
"""
Returns:
str: a yaml string representation of the config
"""
# to make it show up in docs
return super().dump(*args, **kwargs)
global_cfg = CfgNode()
def get_cfg() -> CfgNode:
"""
Get a copy of the default config.
Returns:
a detectron2 CfgNode instance.
"""
from .defaults import _C
return _C.clone()
def set_global_cfg(cfg: CfgNode) -> None:
"""
Let the global config point to the given cfg.
Assume that the given "cfg" has the key "KEY", after calling
`set_global_cfg(cfg)`, the key can be accessed by:
::
from detectron2.config import global_cfg
print(global_cfg.KEY)
By using a hacky global config, you can access these configs anywhere,
without having to pass the config object or the values deep into the code.
This is a hacky feature introduced for quick prototyping / research exploration.
"""
global global_cfg
global_cfg.clear()
global_cfg.update(cfg)
def configurable(init_func=None, *, from_config=None):
"""
Decorate a function or a class's __init__ method so that it can be called
with a :class:`CfgNode` object using a :func:`from_config` function that translates
:class:`CfgNode` to arguments.
Examples:
::
# Usage 1: Decorator on __init__:
class A:
@configurable
def __init__(self, a, b=2, c=3):
pass
@classmethod
def from_config(cls, cfg): # 'cfg' must be the first argument
# Returns kwargs to be passed to __init__
return {"a": cfg.A, "b": cfg.B}
a1 = A(a=1, b=2) # regular construction
a2 = A(cfg) # construct with a cfg
a3 = A(cfg, b=3, c=4) # construct with extra overwrite
# Usage 2: Decorator on any function. Needs an extra from_config argument:
@configurable(from_config=lambda cfg: {"a: cfg.A, "b": cfg.B})
def a_func(a, b=2, c=3):
pass
a1 = a_func(a=1, b=2) # regular call
a2 = a_func(cfg) # call with a cfg
a3 = a_func(cfg, b=3, c=4) # call with extra overwrite
Args:
init_func (callable): a class's ``__init__`` method in usage 1. The
class must have a ``from_config`` classmethod which takes `cfg` as
the first argument.
from_config (callable): the from_config function in usage 2. It must take `cfg`
as its first argument.
"""
if init_func is not None:
assert (
inspect.isfunction(init_func)
and from_config is None
and init_func.__name__ == "__init__"
), "Incorrect use of @configurable. Check API documentation for examples."
@functools.wraps(init_func)
def wrapped(self, *args, **kwargs):
try:<fim_suffix><fim_middle>
from_config_func = type(self).from_config
|
from_config_func = type(self).from_config
|
TRY
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/config/instantiate.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates.
import dataclasses
import logging
from collections import abc
from typing import Any
from detectron2.utils.registry import _convert_target_to_string, locate
__all__ = ["dump_dataclass", "instantiate"]
def dump_dataclass(obj: Any):
"""
Dump a dataclass recursively into a dict that can be later instantiated.
Args:
obj: a dataclass object
Returns:
dict
"""
assert dataclasses.is_dataclass(obj) and not isinstance(
obj, type
), "dump_dataclass() requires an instance of a dataclass."
ret = {"_target_": _convert_target_to_string(type(obj))}
for f in dataclasses.fields(obj):
v = getattr(obj, f.name)
if dataclasses.is_dataclass(v):
v = dump_dataclass(v)
if isinstance(v, (list, tuple)):
v = [dump_dataclass(x) if dataclasses.is_dataclass(x) else x for x in v]
ret[f.name] = v
return ret
def instantiate(cfg):
"""
Recursively instantiate objects defined in dictionaries by
"_target_" and arguments.
Args:
cfg: a dict-like object with "_target_" that defines the caller, and
other keys that define the arguments
Returns:
object instantiated by cfg
"""
from omegaconf import ListConfig
if isinstance(cfg, ListConfig):
lst = [instantiate(x) for x in cfg]
return ListConfig(lst, flags={"allow_objects": True})
if isinstance(cfg, list):
# Specialize for list, because many classes take
# list[objects] as arguments, such as ResNet, DatasetMapper
return [instantiate(x) for x in cfg]
if isinstance(cfg, abc.Mapping) and "_target_" in cfg:
# conceptually equivalent to hydra.utils.instantiate(cfg) with _convert_=all,
# but faster: https://github.com/facebookresearch/hydra/issues/1200
cfg = {k: instantiate(v) for k, v in cfg.items()}
cls = cfg.pop("_target_")
cls = instantiate(cls)
if isinstance(cls, str):
cls_name = cls
cls = locate(cls_name)
assert cls is not None, cls_name
else:
try:
cls_name = cls.__module__ + "." + cls.__qualname__
except<fim_suffix><fim_middle> Exception:
# target could be anything, so the above could fail
cls_name = str(cls)
|
Exception:
# target could be anything, so the above could fail
cls_name = str(cls)
|
CATCH
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/utils/registry.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates.
from typing import Any
import pydoc
from fvcore.common.registry import Registry # for backward compatibility.
"""
``Registry`` and `locate` provide ways to map a string (typically found
in config files) to callable objects.
"""
__all__ = ["Registry", "locate"]
def _convert_target_to_string(t: Any) -> str:
"""
Inverse of ``locate()``.
Args:
t: any object with ``__module__`` and ``__qualname__``
"""
module, qualname = t.__module__, t.__qualname__
# Compress the path to this object, e.g. ``module.submodule._impl.class``
# may become ``module.submodule.class``, if the later also resolves to the same
# object. This simplifies the string, and also is less affected by moving the
# class implementation.
module_parts = module.split(".")
for k in range(1, len(module_parts)):
prefix = ".".join(module_parts[:k])
candidate = f"{prefix}.{qualname}"
try:
if locate(candidate) is t:
return candidate
except ImportError:
pass
return f"{module}.{qualname}"
def locate(name: str) -> Any:
"""
Locate and return an object ``x`` using an input string ``{x.__module__}.{x.__qualname__}``,
such as "module.submodule.class_name".
Raise Exception if it cannot be found.
"""
obj = pydoc.locate(name)
# Some cases (e.g. torch.optim.sgd.SGD) not handled correctly
# by pydoc.locate. Try a private function from hydra.
if obj is None:
try:
# from hydra.utils import get_method - will print many errors
from hydra.utils import _locate
except<fim_suffix><fim_middle> ImportError as e:
raise ImportError(f"Cannot dynamically locate object {name}!") from e
|
ImportError as e:
raise ImportError(f"Cannot dynamically locate object {name}!") from e
|
CATCH
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/config/config.py<fim_prefix># -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import functools
import inspect
import logging
from fvcore.common.config import CfgNode as _CfgNode
from detectron2.utils.file_io import PathManager
class CfgNode(_CfgNode):
"""
The same as `fvcore.common.config.CfgNode`, but different in:
1. Use unsafe yaml loading by default.
Note that this may lead to arbitrary code execution: you must not
load a config file from untrusted sources before manually inspecting
the content of the file.
2. Support config versioning.
When attempting to merge an old config, it will convert the old config automatically.
.. automethod:: clone
.. automethod:: freeze
.. automethod:: defrost
.. automethod:: is_frozen
.. automethod:: load_yaml_with_base
.. automethod:: merge_from_list
.. automethod:: merge_from_other_cfg
"""
@classmethod
def _open_cfg(cls, filename):
return PathManager.open(filename, "r")
# Note that the default value of allow_unsafe is changed to True
def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None:
"""
Load content from the given config file and merge it into self.
Args:
cfg_filename: config filename
allow_unsafe: allow unsafe yaml syntax
"""
assert PathManager.isfile(cfg_filename), f"Config file '{cfg_filename}' does not exist!"
loaded_cfg = self.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe)
loaded_cfg = type(self)(loaded_cfg)
# defaults.py needs to import CfgNode
from .defaults import _C
latest_ver = _C.VERSION
assert (
latest_ver == self.VERSION
), "CfgNode.merge_from_file is only allowed on a config object of latest version!"
logger = logging.getLogger(__name__)
loaded_ver = loaded_cfg.get("VERSION", None)
if loaded_ver is None:
from .compat import guess_version
loaded_ver = guess_version(loaded_cfg, cfg_filename)
assert loaded_ver <= self.VERSION, "Cannot merge a v{} config into a v{} config.".format(
loaded_ver, self.VERSION
)
if loaded_ver == self.VERSION:
self.merge_from_other_cfg(loaded_cfg)
else:
# compat.py needs to import CfgNode
from .compat import upgrade_config, downgrade_config
logger.warning(
"Loading an old v{} config file '{}' by automatically upgrading to v{}. "
"See docs/CHANGELOG.md for instructions to update your files.".format(
loaded_ver, cfg_filename, self.VERSION
)
)
# To convert, first obtain a full config at an old version
old_self = downgrade_config(self, to_version=loaded_ver)
old_self.merge_from_other_cfg(loaded_cfg)
new_config = upgrade_config(old_self)
self.clear()
self.update(new_config)
def dump(self, *args, **kwargs):
"""
Returns:
str: a yaml string representation of the config
"""
# to make it show up in docs
return super().dump(*args, **kwargs)
global_cfg = CfgNode()
def get_cfg() -> CfgNode:
"""
Get a copy of the default config.
Returns:
a detectron2 CfgNode instance.
"""
from .defaults import _C
return _C.clone()
def set_global_cfg(cfg: CfgNode) -> None:
"""
Let the global config point to the given cfg.
Assume that the given "cfg" has the key "KEY", after calling
`set_global_cfg(cfg)`, the key can be accessed by:
::
from detectron2.config import global_cfg
print(global_cfg.KEY)
By using a hacky global config, you can access these configs anywhere,
without having to pass the config object or the values deep into the code.
This is a hacky feature introduced for quick prototyping / research exploration.
"""
global global_cfg
global_cfg.clear()
global_cfg.update(cfg)
def configurable(init_func=None, *, from_config=None):
"""
Decorate a function or a class's __init__ method so that it can be called
with a :class:`CfgNode` object using a :func:`from_config` function that translates
:class:`CfgNode` to arguments.
Examples:
::
# Usage 1: Decorator on __init__:
class A:
@configurable
def __init__(self, a, b=2, c=3):
pass
@classmethod
def from_config(cls, cfg): # 'cfg' must be the first argument
# Returns kwargs to be passed to __init__
return {"a": cfg.A, "b": cfg.B}
a1 = A(a=1, b=2) # regular construction
a2 = A(cfg) # construct with a cfg
a3 = A(cfg, b=3, c=4) # construct with extra overwrite
# Usage 2: Decorator on any function. Needs an extra from_config argument:
@configurable(from_config=lambda cfg: {"a: cfg.A, "b": cfg.B})
def a_func(a, b=2, c=3):
pass
a1 = a_func(a=1, b=2) # regular call
a2 = a_func(cfg) # call with a cfg
a3 = a_func(cfg, b=3, c=4) # call with extra overwrite
Args:
init_func (callable): a class's ``__init__`` method in usage 1. The
class must have a ``from_config`` classmethod which takes `cfg` as
the first argument.
from_config (callable): the from_config function in usage 2. It must take `cfg`
as its first argument.
"""
if init_func is not None:
assert (
inspect.isfunction(init_func)
and from_config is None
and init_func.__name__ == "__init__"
), "Incorrect use of @configurable. Check API documentation for examples."
@functools.wraps(init_func)
def wrapped(self, *args, **kwargs):
try:
from_config_func = type(self).from_config
except<fim_suffix><fim_middle> AttributeError as e:
raise AttributeError(
"Class with @configurable must have a 'from_config' classmethod."
) from e
|
AttributeError as e:
raise AttributeError(
"Class with @configurable must have a 'from_config' classmethod."
) from e
|
CATCH
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>UniRef/detectron2/config/instantiate.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates.
import dataclasses
import logging
from collections import abc
from typing import Any
from detectron2.utils.registry import _convert_target_to_string, locate
__all__ = ["dump_dataclass", "instantiate"]
def dump_dataclass(obj: Any):
"""
Dump a dataclass recursively into a dict that can be later instantiated.
Args:
obj: a dataclass object
Returns:
dict
"""
assert dataclasses.is_dataclass(obj) and not isinstance(
obj, type
), "dump_dataclass() requires an instance of a dataclass."
ret = {"_target_": _convert_target_to_string(type(obj))}
for f in dataclasses.fields(obj):
v = getattr(obj, f.name)
if dataclasses.is_dataclass(v):
v = dump_dataclass(v)
if isinstance(v, (list, tuple)):
v = [dump_dataclass(x) if dataclasses.is_dataclass(x) else x for x in v]
ret[f.name] = v
return ret
def instantiate(cfg):
"""
Recursively instantiate objects defined in dictionaries by
"_target_" and arguments.
Args:
cfg: a dict-like object with "_target_" that defines the caller, and
other keys that define the arguments
Returns:
object instantiated by cfg
"""
from omegaconf import ListConfig
if isinstance(cfg, ListConfig):
lst = [instantiate(x) for x in cfg]
return ListConfig(lst, flags={"allow_objects": True})
if isinstance(cfg, list):
# Specialize for list, because many classes take
# list[objects] as arguments, such as ResNet, DatasetMapper
return [instantiate(x) for x in cfg]
if isinstance(cfg, abc.Mapping) and "_target_" in cfg:
# conceptually equivalent to hydra.utils.instantiate(cfg) with _convert_=all,
# but faster: https://github.com/facebookresearch/hydra/issues/1200
cfg = {k: instantiate(v) for k, v in cfg.items()}
cls = cfg.pop("_target_")
cls = instantiate(cls)
if isinstance(cls, str):
cls_name = cls
cls = locate(cls_name)
assert cls is not None, cls_name
else:
try:
cls_name = cls.__module__ + "." + cls.__qualname__
except Exception:
# target could be anything, so the above could fail
cls_name = str(cls)
assert callable(cls), f"_target_ {cls} does not define a callable object"
try:
return cls(**cfg)
except<fim_suffix><fim_middle> TypeError:
logger = logging.getLogger(__name__)
logger.error(f"Error when instantiating {cls_name}!")
raise
|
TypeError:
logger = logging.getLogger(__name__)
logger.error(f"Error when instantiating {cls_name}!")
raise
|
CATCH
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
<filename>camp_zipnerf/internal/spin_math.py<fim_prefix># coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pyformat: mode=yapf
"""Math utility functions."""
from typing import Optional, Union
from internal import math
import jax
from jax import numpy as jnp
import optax
def matmul(a, b):
"""jnp.matmul defaults to bfloat16 on TPU, but this doesn't."""
return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST)
def safe_sqrt(x,
*,
eps = jnp.finfo(jnp.float32).eps,
value_at_zero = 0.0):
"""A safe version of jnp.sqrt that avoid evaluating at zero.
Note: sqrt(x) = sqrt(eps) = 3e-4 when x < eps = 1.19e-7.
Args:
x: The operand.
eps: A small number to prevent NaNs.
value_at_zero: The value to clamp x to near zero. The return value will be
sqrt(value_at_zero)
Returns:
The sqrt(x), or sqrt(value_at_zero) near zero.
"""
safe_x = jnp.where(x > eps, x, jnp.full_like(x, value_at_zero))
return jnp.sqrt(safe_x)
def safe_acos(t,
eps = jnp.finfo(jnp.float32).eps):
"""A safe version of arccos which avoids evaluating at -1 or 1."""
return jnp.arccos(jnp.clip(t, -1.0 + eps, 1.0 - eps))
def safe_log(x,
*,
eps = jnp.finfo(jnp.float32).eps,
value_at_zero = jnp.finfo(jnp.float32).eps):
"""Computes a safe log that avoids evaluating at zero.
Args:
x: Input array.
eps: A small number to prevent NaNs.
value_at_zero: The value to clamp x to near zero. The return value will be
sqrt(value_at_zero)
Returns:
log(x) or log(value_at_zero) near zero.
"""
safe_x = jnp.where(x > eps, x, jnp.full_like(x, value_at_zero))
return jnp.log(safe_x)
def normalize(
x,
axis = -1,
# pylint: disable=redefined-builtin
ord = None,
eps = jnp.finfo(jnp.float32).eps,
):
"""Normalize a vector."""
return x / optax.safe_norm(x, axis=axis, ord=ord, min_norm=eps, keepdims=True)
def inv_sqrtm(
matrix,
normalize_eigvals = False,
):
"""Takes the inverse matrix square root of a PSD matrix.
Forked from `coord.sqrtm`.
Args:
matrix: (..., d, d) A positive semi-definite matrix.
normalize_eigvals: If True, normalize the eigenvalues by the geometric mean.
Returns:
The inverse square root of the matrix, and (eigvec, eigval) if return_eigs
is True.
"""
eigvec, eigval = jax.lax.linalg.eigh(
matrix, symmetrize_input=False, sort_eigenvalues=False)
if normalize_eigvals:
# Equivalent to dividing by geometric mean, but numerically stabler.
log_eigval = jnp.log(eigval)
eigval = jnp.exp(log_eigval - jnp.mean(log_eigval, axis=-1, keepdims=True))
scaling = math.safe_div(1, math.safe_sqrt(eigval))
scaling = scaling[Ellipsis, None, :]
sqrtm_mat = matmul(eigvec * scaling, jnp.moveaxis(eigvec, -2, -1))
return sqrtm_mat, (eigvec, eigval)
def to_homogeneous(v):
"""Converts a vector to a homogeneous representation.
Args:
v: (*, C) A non-homogeneous vector.
Returns:
(*, C+1) A homogeneous version of v.
"""
return jnp.concatenate([v, jnp.ones_like(v[Ellipsis, :1])], axis=-1)
def from_homogeneous(v):
"""Converts a homogeneous vector to a non-homogeneous vector.
Args:
v: (*, C+1) A homogeneous vector.
Returns:
(*, C) The non-homogeneous version of v.
"""
return v[Ellipsis, :-1] / v[Ellipsis, -1:]
def apply_homogeneous_transform(transform,
vectors):
"""Apply a homogeneous transformation to a collection of vectors.
Args:
transform: (C+1,C+1) A homogeneous transformation matrix.
vectors: (*,C) An array containing 3D points.
Returns:
(*,C) The points transformed by the array.
"""
vectors_h = to_homogeneous(vectors.reshape((-1, vectors.shape[-1])))
transformed = from_homogeneous(matmul(transform, vectors_h.T).T)
return transformed.reshape(vectors.shape)
def generalized_bias_and_gain(x, slope,
threshold):
"""Maps<fim_suffix><fim_middle> the input according to the generalized bias and gain function.
References:
https://arxiv.org/abs/2010.09714
Args:
x: The inputs array with values in [0, 1] to map.
slope: The slope parameter of the curve which controls the slope of the
curve at the threshold.
threshold: The value at which `x` reverses its shape, and the point at which
the output is guaranteed to be equal to the input.
Returns:
The output of the curve at each input point `x`.
"""
|
the input according to the generalized bias and gain function.
References:
https://arxiv.org/abs/2010.09714
Args:
x: The inputs array with values in [0, 1] to map.
slope: The slope parameter of the curve which controls the slope of the
curve at the threshold.
threshold: The value at which `x` reverses its shape, and the point at which
the output is guaranteed to be equal to the input.
Returns:
The output of the curve at each input point `x`.
"""
|
BLOCK_COMMENT
|
prefix_full_suffix_empty_complete_current_block_no_evidence
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.